From 5e7b6c46bc096f9f42689446a4914da1c5707114 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 26 Jun 2019 10:08:59 +0300 Subject: [PATCH 001/234] consensus: consensusGroupSelection subcomponent becomes nodesCoordinator Add entire nodes to shard mapping for management instead of only the nodes for shard Add functionality for getting the signers for a speciffic randomness and bitmap --- consensus/interface.go | 7 +- consensus/mock/consensusDataContainerMock.go | 6 +- consensus/mock/consensusStateMock.go | 4 +- consensus/mock/validatorGroupSelectorMock.go | 30 ++++++- consensus/spos/bls/export_test.go | 2 +- consensus/spos/bn/export_test.go | 2 +- consensus/spos/consensusCore.go | 8 +- consensus/spos/consensusState.go | 2 +- consensus/spos/interface.go | 4 +- .../validators/groupSelectors/export_test.go | 9 -- .../errors.go | 12 ++- .../nodesCoordinator/export_test.go | 9 ++ .../indexHashedNodesCoordinator.go} | 88 ++++++++++++++----- .../indexHashedNodesCoordinator_test.go} | 82 ++++++++++------- node/node.go | 46 +++++----- 15 files changed, 205 insertions(+), 106 deletions(-) delete mode 100644 consensus/validators/groupSelectors/export_test.go rename consensus/validators/{groupSelectors => nodesCoordinator}/errors.go (70%) create mode 100644 consensus/validators/nodesCoordinator/export_test.go rename consensus/validators/{groupSelectors/indexHashedGroup.go => nodesCoordinator/indexHashedNodesCoordinator.go} (60%) rename consensus/validators/{groupSelectors/indexHashedGroup_test.go => nodesCoordinator/indexHashedNodesCoordinator_test.go} (76%) diff --git a/consensus/interface.go b/consensus/interface.go index f4f044de7e5..fdc042ee760 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -55,10 +55,10 @@ type Validator interface { PubKey() []byte } -// ValidatorGroupSelector defines the behaviour of a struct able to do validator group selection -type ValidatorGroupSelector interface { +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinator interface { PublicKeysSelector - LoadEligibleList(eligibleList []Validator) error + LoadNodesPerShards(nodes map[uint32][]Validator) error ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) ConsensusGroupSize() int SetConsensusGroupSize(int) error @@ -67,6 +67,7 @@ type ValidatorGroupSelector interface { // PublicKeysSelector allows retrieval of eligible validators public keys selected by a bitmap type PublicKeysSelector interface { GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) + GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) } // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index 2acb33a16b0..0986fe9f3dd 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -26,7 +26,7 @@ type ConsensusCoreMock struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + validatorGroupSelector consensus.NodesCoordinator } func (cdc *ConsensusCoreMock) Blockchain() data.ChainHandler { @@ -77,7 +77,7 @@ func (cdc *ConsensusCoreMock) SyncTimer() ntp.SyncTimer { return cdc.syncTimer } -func (cdc *ConsensusCoreMock) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +func (cdc *ConsensusCoreMock) ValidatorGroupSelector() consensus.NodesCoordinator { return cdc.validatorGroupSelector } @@ -124,7 +124,7 @@ func (cdc *ConsensusCoreMock) SetSyncTimer(syncTimer ntp.SyncTimer) { cdc.syncTimer = syncTimer } -func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector consensus.ValidatorGroupSelector) { +func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector consensus.NodesCoordinator) { cdc.validatorGroupSelector = validatorGroupSelector } diff --git a/consensus/mock/consensusStateMock.go b/consensus/mock/consensusStateMock.go index 84e17dee6ee..68ea0757558 100644 --- a/consensus/mock/consensusStateMock.go +++ b/consensus/mock/consensusStateMock.go @@ -7,7 +7,7 @@ type ConsensusStateMock struct { IsNodeLeaderInCurrentRoundCalled func(node string) bool IsSelfLeaderInCurrentRoundCalled func() bool GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, error) + GetNextConsensusGroupCalled func(randomSource string, vgs consensus.NodesCoordinator) ([]string, error) IsConsensusDataSetCalled func() bool IsConsensusDataEqualCalled func(data []byte) bool IsJobDoneCalled func(node string, currentSubroundId int) bool @@ -43,7 +43,7 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { } func (cnsm *ConsensusStateMock) GetNextConsensusGroup(randomSource string, - vgs consensus.ValidatorGroupSelector) ([]string, + vgs consensus.NodesCoordinator) ([]string, error) { return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) } diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index f25cdb46405..fc40438d891 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -7,7 +7,8 @@ import ( ) type ValidatorGroupSelectorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) + ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) + GetSelectedValidatorsPublicKeysCalled func(randomness []byte, bitmap []byte) ([]string, error) } func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { @@ -30,11 +31,36 @@ func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) return list, nil } +func (vgsm ValidatorGroupSelectorMock) GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) { + if vgsm.GetSelectedValidatorsPublicKeysCalled != nil { + return vgsm.GetSelectedValidatorsPublicKeysCalled(randomness, bitmap) + } + + validators, err := vgsm.ComputeValidatorsGroup(randomness) + + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for i, v := range validators { + isSelected := (bitmap[i/8] & (1 << (uint16(i) % 8))) != 0 + if !isSelected { + continue + } + + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) LoadEligibleList(eligibleList []consensus.Validator) error { +func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]consensus.Validator) error { return nil } diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index fb9ad3ffa6b..feeb24cb1ef 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -60,7 +60,7 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +func (fct *factory) ValidatorGroupSelector() consensus.NodesCoordinator { return fct.consensusCore.ValidatorGroupSelector() } diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index 0ee7fe58897..334d732cb8d 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -60,7 +60,7 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +func (fct *factory) ValidatorGroupSelector() consensus.NodesCoordinator { return fct.consensusCore.ValidatorGroupSelector() } diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index f659d393696..797010ee715 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -28,7 +28,7 @@ type ConsensusCore struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + validatorGroupSelector consensus.NodesCoordinator } // NewConsensusCore creates a new ConsensusCore instance @@ -47,7 +47,7 @@ func NewConsensusCore( rounder consensus.Rounder, shardCoordinator sharding.Coordinator, syncTimer ntp.SyncTimer, - validatorGroupSelector consensus.ValidatorGroupSelector) (*ConsensusCore, error) { + validatorGroupSelector consensus.NodesCoordinator) (*ConsensusCore, error) { consensusCore := &ConsensusCore{ blockChain, @@ -135,8 +135,8 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } -// ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore -func (cc *ConsensusCore) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +// NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore +func (cc *ConsensusCore) ValidatorGroupSelector() consensus.NodesCoordinator { return cc.validatorGroupSelector } diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index db5d6b498af..8f6f3f3249b 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -94,7 +94,7 @@ func (cns *ConsensusState) GetLeader() (string, error) { // GetNextConsensusGroup gets the new consensus group for the current round based on current eligible list and a random // source for the new selection -func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, +func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs consensus.NodesCoordinator) ([]string, error) { validatorsGroup, err := vgs.ComputeValidatorsGroup([]byte(randomSource)) diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 9c80a08310d..e58bea0bb80 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -38,8 +38,8 @@ type ConsensusCoreHandler interface { ShardCoordinator() sharding.Coordinator // SyncTimer gets the SyncTimer stored in the ConsensusCore SyncTimer() ntp.SyncTimer - // ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore - ValidatorGroupSelector() consensus.ValidatorGroupSelector + // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore + ValidatorGroupSelector() consensus.NodesCoordinator // RandomnessPrivateKey returns the private key stored in the ConsensusStore used for randomness generation RandomnessPrivateKey() crypto.PrivateKey // RandomnessSingleSigner returns the single signer stored in the ConsensusStore used for randomness generation diff --git a/consensus/validators/groupSelectors/export_test.go b/consensus/validators/groupSelectors/export_test.go deleted file mode 100644 index ba4f0f77611..00000000000 --- a/consensus/validators/groupSelectors/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package groupSelectors - -import ( - "github.com/ElrondNetwork/elrond-go/consensus" -) - -func (ihgs *indexHashedGroupSelector) EligibleList() []consensus.Validator { - return ihgs.eligibleList -} diff --git a/consensus/validators/groupSelectors/errors.go b/consensus/validators/nodesCoordinator/errors.go similarity index 70% rename from consensus/validators/groupSelectors/errors.go rename to consensus/validators/nodesCoordinator/errors.go index 986f109ddea..95df4739f77 100644 --- a/consensus/validators/groupSelectors/errors.go +++ b/consensus/validators/nodesCoordinator/errors.go @@ -1,11 +1,14 @@ -package groupSelectors +package nodesCoordinator import ( "errors" ) -// ErrNilInputSlice signals that a nil slice has been provided -var ErrNilInputSlice = errors.New("nil input slice") +// ErrNilInputNodesMap signals that a nil nodes map was provided +var ErrNilInputNodesMap = errors.New("nil input nodes map") + +// ErrNilInputNodesList signals that a nil nodes list was provided +var ErrNilInputNodesList = errors.New("nil input nodes list") // ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size var ErrSmallEligibleListSize = errors.New("small eligible list size") @@ -16,6 +19,9 @@ var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") // ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") +// ErrLeaderNotSelectedInBitmap signals an invalid validators selection from a consensus group as leader is not marked +var ErrLeaderNotSelectedInBitmap = errors.New("bitmap invalid as leader is not selected") + // ErrEligibleTooManySelections signals an invalid selection for consensus group var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") diff --git a/consensus/validators/nodesCoordinator/export_test.go b/consensus/validators/nodesCoordinator/export_test.go new file mode 100644 index 00000000000..51526ef208a --- /dev/null +++ b/consensus/validators/nodesCoordinator/export_test.go @@ -0,0 +1,9 @@ +package nodesCoordinator + +import ( + "github.com/ElrondNetwork/elrond-go/consensus" +) + +func (ihgs *indexHashedNodesCoordinator) EligibleList() []consensus.Validator { + return ihgs.nodesMap[ihgs.shardId] +} diff --git a/consensus/validators/groupSelectors/indexHashedGroup.go b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go similarity index 60% rename from consensus/validators/groupSelectors/indexHashedGroup.go rename to consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go index d45e242a691..36a98563922 100644 --- a/consensus/validators/groupSelectors/indexHashedGroup.go +++ b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go @@ -1,4 +1,4 @@ -package groupSelectors +package nodesCoordinator import ( "bytes" @@ -9,22 +9,34 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" ) -type indexHashedGroupSelector struct { +const defaultRating = 0 +const defaultStake = 0 + +type indexHashedNodesCoordinator struct { + nbShards uint32 + shardId uint32 hasher hashing.Hasher - eligibleList []consensus.Validator + nodesMap map[uint32][]consensus.Validator expandedEligibleList []consensus.Validator consensusGroupSize int } // NewIndexHashedGroupSelector creates a new index hashed group selector -func NewIndexHashedGroupSelector(consensusGroupSize int, hasher hashing.Hasher) (*indexHashedGroupSelector, error) { +func NewIndexHashedGroupSelector( + consensusGroupSize int, + hasher hashing.Hasher, + shardId uint32, + nbShards uint32, +) (*indexHashedNodesCoordinator, error) { if hasher == nil { return nil, ErrNilHasher } - ihgs := &indexHashedGroupSelector{ + ihgs := &indexHashedNodesCoordinator{ + nbShards: nbShards, + shardId: shardId, hasher: hasher, - eligibleList: make([]consensus.Validator, 0), + nodesMap: make(map[uint32][]consensus.Validator), expandedEligibleList: make([]consensus.Validator, 0), } @@ -36,14 +48,14 @@ func NewIndexHashedGroupSelector(consensusGroupSize int, hasher hashing.Hasher) return ihgs, nil } -// LoadEligibleList loads the eligible list -func (ihgs *indexHashedGroupSelector) LoadEligibleList(eligibleList []consensus.Validator) error { - if eligibleList == nil { - return ErrNilInputSlice +// LoadNodesPerShards loads the distribution of nodes per shard into the nodes management component +func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]consensus.Validator) error { + if nodes == nil { + return ErrNilInputNodesMap } - ihgs.eligibleList = make([]consensus.Validator, len(eligibleList)) - copy(ihgs.eligibleList, eligibleList) + ihgs.nodesMap = nodes + return nil } @@ -56,8 +68,8 @@ func (ihgs *indexHashedGroupSelector) LoadEligibleList(eligibleList []consensus. // exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until // the item at the new proposed index is not found in the list. This new proposed index will be called checked index // 4. the item at the checked index is appended in the temp validator list -func (ihgs *indexHashedGroupSelector) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { - if len(ihgs.eligibleList) < ihgs.consensusGroupSize { +func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { + if len(ihgs.nodesMap[ihgs.shardId]) < ihgs.consensusGroupSize { return nil, ErrSmallEligibleListSize } @@ -79,11 +91,39 @@ func (ihgs *indexHashedGroupSelector) ComputeValidatorsGroup(randomness []byte) return tempList, nil } +// GetSelectedValidatorsPublicKeys calculates the validators group for a speciffic randomness and selection bitmap, +// returning their public keys +func (ihgs *indexHashedNodesCoordinator) GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) { + isLeaderSelected := bitmap[0]&1 != 0 // first bit in bitmap selects leader + // leader always needs to be selected + if !isLeaderSelected { + return nil, ErrLeaderNotSelectedInBitmap + } + + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for i, v := range consensusNodes { + isSelected := (bitmap[i/8] & (1 << (uint16(i) % 8))) != 0 + if !isSelected { + continue + } + + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + // GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap // TODO: This function needs to be revised when the requirements are clarified -func (ihgs *indexHashedGroupSelector) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte - shardEligibleLen := uint16(len(ihgs.eligibleList)) + shardEligibleLen := uint16(len(ihgs.nodesMap[ihgs.shardId])) invalidSelection := selectionLen < shardEligibleLen if invalidSelection { @@ -100,7 +140,7 @@ func (ihgs *indexHashedGroupSelector) GetSelectedPublicKeys(selection []byte) (p continue } - publicKeys[cnt] = string(ihgs.eligibleList[i].PubKey()) + publicKeys[cnt] = string(ihgs.nodesMap[ihgs.shardId][i].PubKey()) cnt++ if cnt > ihgs.consensusGroupSize { @@ -115,13 +155,13 @@ func (ihgs *indexHashedGroupSelector) GetSelectedPublicKeys(selection []byte) (p return publicKeys, nil } -func (ihgs *indexHashedGroupSelector) expandEligibleList() []consensus.Validator { +func (ihgs *indexHashedNodesCoordinator) expandEligibleList() []consensus.Validator { //TODO implement an expand eligible list variant - return ihgs.eligibleList + return ihgs.nodesMap[ihgs.shardId] } // computeListIndex computes a proposed index from expanded eligible list -func (ihgs *indexHashedGroupSelector) computeListIndex(currentIndex int, randomSource string) int { +func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, randomSource string) int { buffCurrentIndex := make([]byte, 8) binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) @@ -136,7 +176,7 @@ func (ihgs *indexHashedGroupSelector) computeListIndex(currentIndex int, randomS } // checkIndex returns a checked index starting from a proposed index -func (ihgs *indexHashedGroupSelector) checkIndex(proposedIndex int, selectedList []consensus.Validator) int { +func (ihgs *indexHashedNodesCoordinator) checkIndex(proposedIndex int, selectedList []consensus.Validator) int { for { v := ihgs.expandedEligibleList[proposedIndex] @@ -152,7 +192,7 @@ func (ihgs *indexHashedGroupSelector) checkIndex(proposedIndex int, selectedList } // validatorIsInList returns true if a validator has been found in provided list -func (ihgs *indexHashedGroupSelector) validatorIsInList(v consensus.Validator, list []consensus.Validator) bool { +func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v consensus.Validator, list []consensus.Validator) bool { for i := 0; i < len(list); i++ { if bytes.Equal(v.PubKey(), list[i].PubKey()) { return true @@ -163,12 +203,12 @@ func (ihgs *indexHashedGroupSelector) validatorIsInList(v consensus.Validator, l } // ConsensusGroupSize returns the consensus group size -func (ihgs *indexHashedGroupSelector) ConsensusGroupSize() int { +func (ihgs *indexHashedNodesCoordinator) ConsensusGroupSize() int { return ihgs.consensusGroupSize } // SetConsensusGroupSize sets the consensus group size -func (ihgs *indexHashedGroupSelector) SetConsensusGroupSize(consensusGroupSize int) error { +func (ihgs *indexHashedNodesCoordinator) SetConsensusGroupSize(consensusGroupSize int) error { if consensusGroupSize < 1 { return ErrInvalidConsensusGroupSize } diff --git a/consensus/validators/groupSelectors/indexHashedGroup_test.go b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go similarity index 76% rename from consensus/validators/groupSelectors/indexHashedGroup_test.go rename to consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go index dbc8d0fee89..7272a2d0d7d 100644 --- a/consensus/validators/groupSelectors/indexHashedGroup_test.go +++ b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -1,4 +1,4 @@ -package groupSelectors_test +package nodesCoordinator_test import ( "encoding/binary" @@ -8,7 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/mock" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" + "github.com/ElrondNetwork/elrond-go/consensus/validators/nodesCoordinator" "github.com/stretchr/testify/assert" ) @@ -28,25 +28,25 @@ func uint64ToBytes(value uint64) []byte { func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { t.Parallel() - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, nil) + ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(1, nil, 0, 1) assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrNilHasher, err) + assert.Equal(t, nodesCoordinator.ErrNilHasher, err) } func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(0, mock.HasherMock{}) + ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(0, mock.HasherMock{}, 0, 1) assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrInvalidConsensusGroupSize, err) + assert.Equal(t, nodesCoordinator.ErrInvalidConsensusGroupSize, err) } func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) + ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) assert.NotNil(t, ihgs) assert.Nil(t, err) @@ -57,22 +57,25 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) - assert.Equal(t, groupSelectors.ErrNilInputSlice, ihgs.LoadEligibleList(nil)) + assert.Equal(t, nodesCoordinator.ErrNilInputNodesMap, ihgs.LoadNodesPerShards(nil)) } func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - err := ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + + err := ihgs.LoadNodesPerShards(nodesMap) assert.Nil(t, err) assert.Equal(t, list, ihgs.EligibleList()) } @@ -82,50 +85,54 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) list := make([]consensus.Validator, 0) list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) + assert.Equal(t, nodesCoordinator.ErrSmallEligibleListSize, err) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) + assert.Equal(t, nodesCoordinator.ErrSmallEligibleListSize, err) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, mock.HasherMock{}, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup(nil) assert.Nil(t, list2) - assert.Equal(t, groupSelectors.ErrNilRandomness, err) + assert.Equal(t, nodesCoordinator.ErrNilRandomness, err) } //------- functionality tests @@ -133,13 +140,15 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { t.Parallel() - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) @@ -169,14 +178,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi return nil } - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -206,7 +217,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd return nil } - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) @@ -216,7 +227,9 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd validator1, } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -247,14 +260,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex return nil } - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) list := []consensus.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -296,7 +311,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho return convertBigIntToBytes(val) } - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(6, hasher) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(6, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) @@ -322,7 +337,9 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho validator9, } - _ = ihgs.LoadEligibleList(list) + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -341,7 +358,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { consensusGroupSize := 21 - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}) + ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}, 0, 1) list := make([]consensus.Validator, 0) @@ -349,7 +366,10 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. for i := 0; i < 400; i++ { list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) } - _ = ihgs.LoadEligibleList(list) + + nodesMap := make(map[uint32][]consensus.Validator) + nodesMap[0] = list + _ = ihgs.LoadNodesPerShards(nodesMap) b.ResetTimer() diff --git a/node/node.go b/node/node.go index 779369c6520..29b0198aa8c 100644 --- a/node/node.go +++ b/node/node.go @@ -14,8 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/chronology" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/consensus/validators" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" + "github.com/ElrondNetwork/elrond-go/consensus/validators/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/genesis" "github.com/ElrondNetwork/elrond-go/core/logger" @@ -34,6 +33,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/consensus/validators" ) // WaitTime defines the time in milliseconds until node waits the requested info from the network @@ -268,7 +268,7 @@ func (n *Node) StartConsensus() error { return err } - validatorGroupSelector, err := n.createValidatorGroupSelector() + nCoordinator, err := n.createNodesCoordinator() if err != nil { return err } @@ -288,7 +288,7 @@ func (n *Node) StartConsensus() error { n.rounder, n.shardCoordinator, n.syncTimer, - validatorGroupSelector) + nCoordinator) if err != nil { return err } @@ -484,35 +484,41 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { return consensusState, nil } -// createValidatorGroupSelector creates a index hashed group selector object -func (n *Node) createValidatorGroupSelector() (consensus.ValidatorGroupSelector, error) { - validatorGroupSelector, err := groupSelectors.NewIndexHashedGroupSelector(n.consensusGroupSize, n.hasher) +// createNodesCoordinator creates a index hashed group selector object +func (n *Node) createNodesCoordinator() (consensus.NodesCoordinator, error) { + nCoordinator, err := nodesCoordinator.NewIndexHashedGroupSelector( + n.consensusGroupSize, + n.hasher, + n.shardCoordinator.SelfId(), + n.shardCoordinator.NumberOfShards(), + ) if err != nil { return nil, err } - validatorsList := make([]consensus.Validator, 0) - shID := n.shardCoordinator.SelfId() + nodesMap := make(map[uint32][]consensus.Validator) + nbShards := n.shardCoordinator.NumberOfShards() - if len(n.initialNodesPubkeys[shID]) == 0 { - return nil, errors.New("could not create validator group as shardID is out of range") - } + for sh := uint32(0); sh < nbShards; sh++ { + nodesInShard := len(n.initialNodesPubkeys[sh]) + nodesMap[sh] = make([]consensus.Validator, nodesInShard) - for i := 0; i < len(n.initialNodesPubkeys[shID]); i++ { - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shID][i])) - if err != nil { - return nil, err - } + for i := 0; i < nodesInShard; i++ { + validator, err := validators.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) + if err != nil { + return nil, err + } - validatorsList = append(validatorsList, validator) + nodesMap[sh][i] = validator + } } - err = validatorGroupSelector.LoadEligibleList(validatorsList) + err = nCoordinator.LoadNodesPerShards(nodesMap) if err != nil { return nil, err } - return validatorGroupSelector, nil + return nCoordinator, nil } // createConsensusTopic creates a consensus topic for node From 83f22c890f7fe7dd7f05dfdb81c19eb095347270 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 26 Jun 2019 10:30:58 +0300 Subject: [PATCH 002/234] consensus: remove bitmap from consensus group public key selection --- consensus/interface.go | 4 ++-- consensus/mock/validatorGroupSelectorMock.go | 18 ++++++------------ .../indexHashedNodesCoordinator.go | 17 +++-------------- 3 files changed, 11 insertions(+), 28 deletions(-) diff --git a/consensus/interface.go b/consensus/interface.go index fdc042ee760..f7008e7c4f6 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -64,10 +64,10 @@ type NodesCoordinator interface { SetConsensusGroupSize(int) error } -// PublicKeysSelector allows retrieval of eligible validators public keys selected by a bitmap +// PublicKeysSelector allows retrieval of eligible validators public keys type PublicKeysSelector interface { GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) - GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) + GetValidatorsPublicKeys(randomness []byte) ([]string, error) } // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index fc40438d891..68da32357ce 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -7,8 +7,8 @@ import ( ) type ValidatorGroupSelectorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) - GetSelectedValidatorsPublicKeysCalled func(randomness []byte, bitmap []byte) ([]string, error) + ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) } func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { @@ -31,25 +31,19 @@ func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) return list, nil } -func (vgsm ValidatorGroupSelectorMock) GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) { - if vgsm.GetSelectedValidatorsPublicKeysCalled != nil { - return vgsm.GetSelectedValidatorsPublicKeysCalled(randomness, bitmap) +func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if vgsm.GetValidatorsPublicKeysCalled != nil { + return vgsm.GetValidatorsPublicKeysCalled(randomness) } validators, err := vgsm.ComputeValidatorsGroup(randomness) - if err != nil { return nil, err } pubKeys := make([]string, 0) - for i, v := range validators { - isSelected := (bitmap[i/8] & (1 << (uint16(i) % 8))) != 0 - if !isSelected { - continue - } - + for _, v := range validators { pubKeys = append(pubKeys, string(v.PubKey())) } diff --git a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go index 36a98563922..a80e1d8af92 100644 --- a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go @@ -91,15 +91,9 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt return tempList, nil } -// GetSelectedValidatorsPublicKeys calculates the validators group for a speciffic randomness and selection bitmap, +// GetSelectedValidatorsPublicKeys calculates the validators group for a specific randomness, // returning their public keys -func (ihgs *indexHashedNodesCoordinator) GetSelectedValidatorsPublicKeys(randomness []byte, bitmap []byte) ([]string, error) { - isLeaderSelected := bitmap[0]&1 != 0 // first bit in bitmap selects leader - // leader always needs to be selected - if !isLeaderSelected { - return nil, ErrLeaderNotSelectedInBitmap - } - +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness) if err != nil { return nil, err @@ -107,12 +101,7 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedValidatorsPublicKeys(randomn pubKeys := make([]string, 0) - for i, v := range consensusNodes { - isSelected := (bitmap[i/8] & (1 << (uint16(i) % 8))) != 0 - if !isSelected { - continue - } - + for _, v := range consensusNodes { pubKeys = append(pubKeys, string(v.PubKey())) } From 38ebacc29516d3af6a53aa01f65dc726869ad256 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 26 Jun 2019 15:02:15 +0300 Subject: [PATCH 003/234] consensus, sharding: move validator structure and nodes coordinator from consensus to sharding --- README.md | 4 +- consensus/interface.go | 23 ----- consensus/mock/consensusDataContainerMock.go | 6 +- consensus/mock/consensusStateMock.go | 9 +- consensus/mock/validatorGroupSelectorMock.go | 11 ++- consensus/spos/bls/export_test.go | 2 +- consensus/spos/bn/export_test.go | 2 +- .../commonSubround/subroundStartRound_test.go | 10 +-- consensus/spos/consensusCore.go | 6 +- consensus/spos/consensusState.go | 3 +- consensus/spos/consensusState_test.go | 3 +- consensus/spos/interface.go | 2 +- consensus/validators/errors.go | 14 --- .../validators/nodesCoordinator/errors.go | 35 -------- .../nodesCoordinator/export_test.go | 9 -- node/node.go | 12 ++- sharding/errors.go | 33 +++++++ sharding/export_test.go | 4 + .../indexHashedNodesCoordinator.go | 23 +++-- .../indexHashedNodesCoordinator_test.go | 85 +++++++++---------- sharding/interface.go | 40 +++++++++ sharding/mock/hasherMock.go | 29 +++++++ sharding/mock/hasherStub.go | 22 +++++ sharding/mock/validatorGroupSelectorMock.go | 67 +++++++++++++++ sharding/mock/validatorMock.go | 27 ++++++ sharding/nodesSetup_test.go | 2 +- sharding/sharding.go | 17 ---- .../validators => sharding}/validator.go | 2 +- .../validators => sharding}/validator_test.go | 21 +++-- 29 files changed, 323 insertions(+), 200 deletions(-) delete mode 100644 consensus/validators/errors.go delete mode 100644 consensus/validators/nodesCoordinator/errors.go delete mode 100644 consensus/validators/nodesCoordinator/export_test.go rename {consensus/validators/nodesCoordinator => sharding}/indexHashedNodesCoordinator.go (90%) rename {consensus/validators/nodesCoordinator => sharding}/indexHashedNodesCoordinator_test.go (77%) create mode 100644 sharding/interface.go create mode 100644 sharding/mock/hasherMock.go create mode 100644 sharding/mock/hasherStub.go create mode 100644 sharding/mock/validatorGroupSelectorMock.go create mode 100644 sharding/mock/validatorMock.go delete mode 100644 sharding/sharding.go rename {consensus/validators => sharding}/validator.go (97%) rename {consensus/validators => sharding}/validator_test.go (54%) diff --git a/README.md b/README.md index d0c536dda14..bd43fd30c46 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,8 @@ The main branch that will be used is the master branch. Alternatively, an older ``` $ mkdir -p $GOPATH/src/github.com/ElrondNetwork $ cd $GOPATH/src/github.com/ElrondNetwork -$ git clone https://github.com/ElrondNetwork/elrond-go-sandbox -$ cd elrond-go-sandbox && git checkout master +$ git clone https://github.com/ElrondNetwork/elrond-go +$ cd elrond-go && git checkout master $ GO111MODULE=on go mod vendor $ cd cmd/node && go build ``` diff --git a/consensus/interface.go b/consensus/interface.go index f7008e7c4f6..90063b01192 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -1,7 +1,6 @@ package consensus import ( - "math/big" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -48,28 +47,6 @@ type SposFactory interface { GenerateSubrounds() } -// Validator defines what a consensus validator implementation should do. -type Validator interface { - Stake() *big.Int - Rating() int32 - PubKey() []byte -} - -// NodesCoordinator defines the behaviour of a struct able to do validator group selection -type NodesCoordinator interface { - PublicKeysSelector - LoadNodesPerShards(nodes map[uint32][]Validator) error - ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) - ConsensusGroupSize() int - SetConsensusGroupSize(int) error -} - -// PublicKeysSelector allows retrieval of eligible validators public keys -type PublicKeysSelector interface { - GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) - GetValidatorsPublicKeys(randomness []byte) ([]string, error) -} - // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index 0986fe9f3dd..d16ab18cab3 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -26,7 +26,7 @@ type ConsensusCoreMock struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.NodesCoordinator + validatorGroupSelector sharding.NodesCoordinator } func (cdc *ConsensusCoreMock) Blockchain() data.ChainHandler { @@ -77,7 +77,7 @@ func (cdc *ConsensusCoreMock) SyncTimer() ntp.SyncTimer { return cdc.syncTimer } -func (cdc *ConsensusCoreMock) ValidatorGroupSelector() consensus.NodesCoordinator { +func (cdc *ConsensusCoreMock) ValidatorGroupSelector() sharding.NodesCoordinator { return cdc.validatorGroupSelector } @@ -124,7 +124,7 @@ func (cdc *ConsensusCoreMock) SetSyncTimer(syncTimer ntp.SyncTimer) { cdc.syncTimer = syncTimer } -func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector consensus.NodesCoordinator) { +func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector sharding.NodesCoordinator) { cdc.validatorGroupSelector = validatorGroupSelector } diff --git a/consensus/mock/consensusStateMock.go b/consensus/mock/consensusStateMock.go index 68ea0757558..6ba2b9cd610 100644 --- a/consensus/mock/consensusStateMock.go +++ b/consensus/mock/consensusStateMock.go @@ -1,13 +1,16 @@ package mock -import "github.com/ElrondNetwork/elrond-go/consensus" +import ( + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/sharding" +) type ConsensusStateMock struct { ResetConsensusStateCalled func() IsNodeLeaderInCurrentRoundCalled func(node string) bool IsSelfLeaderInCurrentRoundCalled func() bool GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs consensus.NodesCoordinator) ([]string, error) + GetNextConsensusGroupCalled func(randomSource string, vgs sharding.NodesCoordinator) ([]string, error) IsConsensusDataSetCalled func() bool IsConsensusDataEqualCalled func(data []byte) bool IsJobDoneCalled func(node string, currentSubroundId int) bool @@ -43,7 +46,7 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { } func (cnsm *ConsensusStateMock) GetNextConsensusGroup(randomSource string, - vgs consensus.NodesCoordinator) ([]string, + vgs sharding.NodesCoordinator) ([]string, error) { return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) } diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index 68da32357ce..8e0af6f606c 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -2,21 +2,20 @@ package mock import ( "math/big" - - "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/sharding" ) type ValidatorGroupSelectorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) } -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { +func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { if vgsm.ComputeValidatorsGroupCalled != nil { return vgsm.ComputeValidatorsGroupCalled(randomness) } - list := []consensus.Validator{ + list := []sharding.Validator{ NewValidatorMock(big.NewInt(0), 0, []byte("A")), NewValidatorMock(big.NewInt(0), 0, []byte("B")), NewValidatorMock(big.NewInt(0), 0, []byte("C")), @@ -54,7 +53,7 @@ func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]consensus.Validator) error { +func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { return nil } diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index feeb24cb1ef..30c56804ce1 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -60,7 +60,7 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.NodesCoordinator { +func (fct *factory) ValidatorGroupSelector() sharding.NodesCoordinator { return fct.consensusCore.ValidatorGroupSelector() } diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index 334d732cb8d..eccda486ab1 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -60,7 +60,7 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.NodesCoordinator { +func (fct *factory) ValidatorGroupSelector() sharding.NodesCoordinator { return fct.consensusCore.ValidatorGroupSelector() } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 640fa71893b..3ff5707f065 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding" ) func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.SubroundStartRound, error) { @@ -321,7 +321,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon validatorGroupSelector := mock.ValidatorGroupSelectorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() @@ -337,8 +337,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t t.Parallel() validatorGroupSelector := mock.ValidatorGroupSelectorMock{} - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { - return make([]consensus.Validator, 0), nil + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { + return make([]sharding.Validator, 0), nil } container := mock.InitConsensusCore() @@ -426,7 +426,7 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing validatorGroupSelector := mock.ValidatorGroupSelectorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 797010ee715..5acab2254f9 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -28,7 +28,7 @@ type ConsensusCore struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.NodesCoordinator + validatorGroupSelector sharding.NodesCoordinator } // NewConsensusCore creates a new ConsensusCore instance @@ -47,7 +47,7 @@ func NewConsensusCore( rounder consensus.Rounder, shardCoordinator sharding.Coordinator, syncTimer ntp.SyncTimer, - validatorGroupSelector consensus.NodesCoordinator) (*ConsensusCore, error) { + validatorGroupSelector sharding.NodesCoordinator) (*ConsensusCore, error) { consensusCore := &ConsensusCore{ blockChain, @@ -136,7 +136,7 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { } // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore -func (cc *ConsensusCore) ValidatorGroupSelector() consensus.NodesCoordinator { +func (cc *ConsensusCore) ValidatorGroupSelector() sharding.NodesCoordinator { return cc.validatorGroupSelector } diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 8f6f3f3249b..cfc08163216 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() @@ -94,7 +95,7 @@ func (cns *ConsensusState) GetLeader() (string, error) { // GetNextConsensusGroup gets the new consensus group for the current round based on current eligible list and a random // source for the new selection -func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs consensus.NodesCoordinator) ([]string, +func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs sharding.NodesCoordinator) ([]string, error) { validatorsGroup, err := vgs.ComputeValidatorsGroup([]byte(randomSource)) diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index e66a2800167..6c3f8047561 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding" ) func internalInitConsensusState() *spos.ConsensusState { @@ -140,7 +141,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou vgs := mock.ValidatorGroupSelectorMock{} err := errors.New("error") - vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]consensus.Validator, error) { + vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]sharding.Validator, error) { return nil, err } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index e58bea0bb80..71737728ff8 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -39,7 +39,7 @@ type ConsensusCoreHandler interface { // SyncTimer gets the SyncTimer stored in the ConsensusCore SyncTimer() ntp.SyncTimer // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore - ValidatorGroupSelector() consensus.NodesCoordinator + ValidatorGroupSelector() sharding.NodesCoordinator // RandomnessPrivateKey returns the private key stored in the ConsensusStore used for randomness generation RandomnessPrivateKey() crypto.PrivateKey // RandomnessSingleSigner returns the single signer stored in the ConsensusStore used for randomness generation diff --git a/consensus/validators/errors.go b/consensus/validators/errors.go deleted file mode 100644 index 9276c1ca7f4..00000000000 --- a/consensus/validators/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package validators - -import ( - "errors" -) - -// ErrNilStake signals that a nil stake structure has been provided -var ErrNilStake = errors.New("nil stake") - -// ErrNegativeStake signals that the stake is negative -var ErrNegativeStake = errors.New("negative stake") - -// ErrNilPubKey signals that the public key is nil -var ErrNilPubKey = errors.New("nil public key") diff --git a/consensus/validators/nodesCoordinator/errors.go b/consensus/validators/nodesCoordinator/errors.go deleted file mode 100644 index 95df4739f77..00000000000 --- a/consensus/validators/nodesCoordinator/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -package nodesCoordinator - -import ( - "errors" -) - -// ErrNilInputNodesMap signals that a nil nodes map was provided -var ErrNilInputNodesMap = errors.New("nil input nodes map") - -// ErrNilInputNodesList signals that a nil nodes list was provided -var ErrNilInputNodesList = errors.New("nil input nodes list") - -// ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size -var ErrSmallEligibleListSize = errors.New("small eligible list size") - -// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) -var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") - -// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap -var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") - -// ErrLeaderNotSelectedInBitmap signals an invalid validators selection from a consensus group as leader is not marked -var ErrLeaderNotSelectedInBitmap = errors.New("bitmap invalid as leader is not selected") - -// ErrEligibleTooManySelections signals an invalid selection for consensus group -var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") - -// ErrEligibleTooFewSelections signals an invalid selection for consensus group -var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") - -// ErrNilRandomness signals that a nil randomness source has been provided -var ErrNilRandomness = errors.New("nil randomness source") - -// ErrNilHasher signals that a nil hasher has been provided -var ErrNilHasher = errors.New("nil hasher") diff --git a/consensus/validators/nodesCoordinator/export_test.go b/consensus/validators/nodesCoordinator/export_test.go deleted file mode 100644 index 51526ef208a..00000000000 --- a/consensus/validators/nodesCoordinator/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package nodesCoordinator - -import ( - "github.com/ElrondNetwork/elrond-go/consensus" -) - -func (ihgs *indexHashedNodesCoordinator) EligibleList() []consensus.Validator { - return ihgs.nodesMap[ihgs.shardId] -} diff --git a/node/node.go b/node/node.go index 29b0198aa8c..a7a643fa8e4 100644 --- a/node/node.go +++ b/node/node.go @@ -14,7 +14,6 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/chronology" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/consensus/validators/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/genesis" "github.com/ElrondNetwork/elrond-go/core/logger" @@ -33,7 +32,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/consensus/validators" ) // WaitTime defines the time in milliseconds until node waits the requested info from the network @@ -485,8 +483,8 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { } // createNodesCoordinator creates a index hashed group selector object -func (n *Node) createNodesCoordinator() (consensus.NodesCoordinator, error) { - nCoordinator, err := nodesCoordinator.NewIndexHashedGroupSelector( +func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { + nCoordinator, err := sharding.NewIndexHashedGroupSelector( n.consensusGroupSize, n.hasher, n.shardCoordinator.SelfId(), @@ -496,15 +494,15 @@ func (n *Node) createNodesCoordinator() (consensus.NodesCoordinator, error) { return nil, err } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nbShards := n.shardCoordinator.NumberOfShards() for sh := uint32(0); sh < nbShards; sh++ { nodesInShard := len(n.initialNodesPubkeys[sh]) - nodesMap[sh] = make([]consensus.Validator, nodesInShard) + nodesMap[sh] = make([]sharding.Validator, nodesInShard) for i := 0; i < nodesInShard; i++ { - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) if err != nil { return nil, err } diff --git a/sharding/errors.go b/sharding/errors.go index 2f803ac9fee..e324ab5e5ca 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -36,3 +36,36 @@ var ErrMinNodesPerShardSmallerThanConsensusSize = errors.New("minimum nodes per // ErrNodesSizeSmallerThanMinNoOfNodes signals that there are not enough nodes defined in genesis file var ErrNodesSizeSmallerThanMinNoOfNodes = errors.New("length of nodes defined is smaller than min nodes per shard required") + +// ErrNilStake signals that a nil stake structure has been provided +var ErrNilStake = errors.New("nil stake") + +// ErrNegativeStake signals that the stake is negative +var ErrNegativeStake = errors.New("negative stake") + +// ErrNilPubKey signals that the public key is nil +var ErrNilPubKey = errors.New("nil public key") + +// ErrNilInputNodesMap signals that a nil nodes map was provided +var ErrNilInputNodesMap = errors.New("nil input nodes map") + +// ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallEligibleListSize = errors.New("small eligible list size") + +// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) +var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") + +// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap +var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") + +// ErrEligibleTooManySelections signals an invalid selection for consensus group +var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") + +// ErrEligibleTooFewSelections signals an invalid selection for consensus group +var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") + +// ErrNilRandomness signals that a nil randomness source has been provided +var ErrNilRandomness = errors.New("nil randomness source") + +// ErrNilHasher signals that a nil hasher has been provided +var ErrNilHasher = errors.New("nil hasher") diff --git a/sharding/export_test.go b/sharding/export_test.go index 4d6cd6c7e5e..b7f35d44713 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -31,3 +31,7 @@ func (ns *NodesSetup) CreateInitialNodesPubKeys() { func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) string { return communicationIdentifierBetweenShards(shardId1, shardId2) } + +func (ihgs *indexHashedNodesCoordinator) EligibleList() []Validator { + return ihgs.nodesMap[ihgs.shardId] +} \ No newline at end of file diff --git a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go similarity index 90% rename from consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go rename to sharding/indexHashedNodesCoordinator.go index a80e1d8af92..06873cc3206 100644 --- a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -1,11 +1,10 @@ -package nodesCoordinator +package sharding import ( "bytes" "encoding/binary" "math/big" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/hashing" ) @@ -16,8 +15,8 @@ type indexHashedNodesCoordinator struct { nbShards uint32 shardId uint32 hasher hashing.Hasher - nodesMap map[uint32][]consensus.Validator - expandedEligibleList []consensus.Validator + nodesMap map[uint32][]Validator + expandedEligibleList []Validator consensusGroupSize int } @@ -36,8 +35,8 @@ func NewIndexHashedGroupSelector( nbShards: nbShards, shardId: shardId, hasher: hasher, - nodesMap: make(map[uint32][]consensus.Validator), - expandedEligibleList: make([]consensus.Validator, 0), + nodesMap: make(map[uint32][]Validator), + expandedEligibleList: make([]Validator, 0), } err := ihgs.SetConsensusGroupSize(consensusGroupSize) @@ -49,7 +48,7 @@ func NewIndexHashedGroupSelector( } // LoadNodesPerShards loads the distribution of nodes per shard into the nodes management component -func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]consensus.Validator) error { +func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]Validator) error { if nodes == nil { return ErrNilInputNodesMap } @@ -68,7 +67,7 @@ func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]c // exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until // the item at the new proposed index is not found in the list. This new proposed index will be called checked index // 4. the item at the checked index is appended in the temp validator list -func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { +func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) { if len(ihgs.nodesMap[ihgs.shardId]) < ihgs.consensusGroupSize { return nil, ErrSmallEligibleListSize } @@ -79,7 +78,7 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt ihgs.expandedEligibleList = ihgs.expandEligibleList() - tempList := make([]consensus.Validator, 0) + tempList := make([]Validator, 0) for startIdx := 0; startIdx < ihgs.consensusGroupSize; startIdx++ { proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) @@ -144,7 +143,7 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) return publicKeys, nil } -func (ihgs *indexHashedNodesCoordinator) expandEligibleList() []consensus.Validator { +func (ihgs *indexHashedNodesCoordinator) expandEligibleList() []Validator { //TODO implement an expand eligible list variant return ihgs.nodesMap[ihgs.shardId] } @@ -165,7 +164,7 @@ func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, rand } // checkIndex returns a checked index starting from a proposed index -func (ihgs *indexHashedNodesCoordinator) checkIndex(proposedIndex int, selectedList []consensus.Validator) int { +func (ihgs *indexHashedNodesCoordinator) checkIndex(proposedIndex int, selectedList []Validator) int { for { v := ihgs.expandedEligibleList[proposedIndex] @@ -181,7 +180,7 @@ func (ihgs *indexHashedNodesCoordinator) checkIndex(proposedIndex int, selectedL } // validatorIsInList returns true if a validator has been found in provided list -func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v consensus.Validator, list []consensus.Validator) bool { +func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []Validator) bool { for i := 0; i < len(list); i++ { if bytes.Equal(v.PubKey(), list[i].PubKey()) { return true diff --git a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go similarity index 77% rename from consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go rename to sharding/indexHashedNodesCoordinator_test.go index 7272a2d0d7d..e18029a4b0f 100644 --- a/consensus/validators/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -1,4 +1,4 @@ -package nodesCoordinator_test +package sharding_test import ( "encoding/binary" @@ -6,10 +6,9 @@ import ( "strconv" "testing" - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/mock" - "github.com/ElrondNetwork/elrond-go/consensus/validators/nodesCoordinator" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding/mock" + "github.com/ElrondNetwork/elrond-go/sharding" ) func convertBigIntToBytes(value *big.Int) []byte { @@ -28,25 +27,25 @@ func uint64ToBytes(value uint64) []byte { func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { t.Parallel() - ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(1, nil, 0, 1) + ihgs, err := sharding.NewIndexHashedGroupSelector(1, nil, 0, 1) assert.Nil(t, ihgs) - assert.Equal(t, nodesCoordinator.ErrNilHasher, err) + assert.Equal(t, sharding.ErrNilHasher, err) } func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(0, mock.HasherMock{}, 0, 1) + ihgs, err := sharding.NewIndexHashedGroupSelector(0, mock.HasherMock{}, 0, 1) assert.Nil(t, ihgs) - assert.Equal(t, nodesCoordinator.ErrInvalidConsensusGroupSize, err) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) } func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() - ihgs, err := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, err := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) assert.NotNil(t, ihgs) assert.Nil(t, err) @@ -57,22 +56,22 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) - assert.Equal(t, nodesCoordinator.ErrNilInputNodesMap, ihgs.LoadNodesPerShards(nil)) + assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.LoadNodesPerShards(nil)) } func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list err := ihgs.LoadNodesPerShards(nodesMap) @@ -85,54 +84,54 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) - list := make([]consensus.Validator, 0) + list := make([]sharding.Validator, 0) list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) assert.Nil(t, list) - assert.Equal(t, nodesCoordinator.ErrSmallEligibleListSize, err) + assert.Equal(t, sharding.ErrSmallEligibleListSize, err) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) assert.Nil(t, list) - assert.Equal(t, nodesCoordinator.ErrSmallEligibleListSize, err) + assert.Equal(t, sharding.ErrSmallEligibleListSize, err) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(2, mock.HasherMock{}, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) list2, err := ihgs.ComputeValidatorsGroup(nil) assert.Nil(t, list2) - assert.Equal(t, nodesCoordinator.ErrNilRandomness, err) + assert.Equal(t, sharding.ErrNilRandomness, err) } //------- functionality tests @@ -140,13 +139,13 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { t.Parallel() - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) @@ -178,14 +177,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi return nil } - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) @@ -217,17 +216,17 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd return nil } - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) - list := []consensus.Validator{ + list := []sharding.Validator{ validator0, validator1, } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) @@ -260,14 +259,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex return nil } - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) - list := []consensus.Validator{ + list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) @@ -311,7 +310,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho return convertBigIntToBytes(val) } - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(6, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(6, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) @@ -324,7 +323,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8")) validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9")) - list := []consensus.Validator{ + list := []sharding.Validator{ validator0, validator1, validator2, @@ -337,7 +336,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho validator9, } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) @@ -358,16 +357,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { consensusGroupSize := 21 - ihgs, _ := nodesCoordinator.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}, 0, 1) - list := make([]consensus.Validator, 0) + list := make([]sharding.Validator, 0) //generate 400 validators for i := 0; i < 400; i++ { list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) } - nodesMap := make(map[uint32][]consensus.Validator) + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list _ = ihgs.LoadNodesPerShards(nodesMap) diff --git a/sharding/interface.go b/sharding/interface.go new file mode 100644 index 00000000000..de1691f1dd4 --- /dev/null +++ b/sharding/interface.go @@ -0,0 +1,40 @@ +package sharding + +import ( + "github.com/ElrondNetwork/elrond-go/data/state" + "math/big" +) + +// MetachainShardId will be used to identify a shard ID as metachain +const MetachainShardId = uint32(0xFFFFFFFF) + +// Coordinator defines what a shard state coordinator should hold +type Coordinator interface { + NumberOfShards() uint32 + ComputeId(address state.AddressContainer) uint32 + SelfId() uint32 + SameShard(firstAddress, secondAddress state.AddressContainer) bool + CommunicationIdentifier(destShardID uint32) string +} + +// Validator defines what a consensus validator implementation should do. +type Validator interface { + Stake() *big.Int + Rating() int32 + PubKey() []byte +} + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinator interface { + PublicKeysSelector + LoadNodesPerShards(nodes map[uint32][]Validator) error + ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) + ConsensusGroupSize() int + SetConsensusGroupSize(int) error +} + +// PublicKeysSelector allows retrieval of eligible validators public keys +type PublicKeysSelector interface { + GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) + GetValidatorsPublicKeys(randomness []byte) ([]string, error) +} diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go new file mode 100644 index 00000000000..0120c31c30f --- /dev/null +++ b/sharding/mock/hasherMock.go @@ -0,0 +1,29 @@ +package mock + +import "crypto/sha256" + +var sha256EmptyHash []byte + +// HasherMock that will be used for testing +type HasherMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha HasherMock) Compute(s string) []byte { + h := sha256.New() + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha HasherMock) EmptyHash() []byte { + if len(sha256EmptyHash) == 0 { + sha256EmptyHash = sha.Compute("") + } + return sha256EmptyHash +} + +// Size return the required size in bytes +func (HasherMock) Size() int { + return sha256.Size +} diff --git a/sharding/mock/hasherStub.go b/sharding/mock/hasherStub.go new file mode 100644 index 00000000000..8684b95ecb2 --- /dev/null +++ b/sharding/mock/hasherStub.go @@ -0,0 +1,22 @@ +package mock + +type HasherStub struct { + ComputeCalled func(s string) []byte + EmptyHashCalled func() []byte + SizeCalled func() int +} + +// Compute will output the SHA's equivalent of the input string +func (hs *HasherStub) Compute(s string) []byte { + return hs.ComputeCalled(s) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (hs *HasherStub) EmptyHash() []byte { + return hs.EmptyHashCalled() +} + +// Size returns the required size in bytes +func (hs *HasherStub) Size() int { + return hs.SizeCalled() +} diff --git a/sharding/mock/validatorGroupSelectorMock.go b/sharding/mock/validatorGroupSelectorMock.go new file mode 100644 index 00000000000..eca63085408 --- /dev/null +++ b/sharding/mock/validatorGroupSelectorMock.go @@ -0,0 +1,67 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type ValidatorGroupSelectorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { + if vgsm.ComputeValidatorsGroupCalled != nil { + return vgsm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A")), + NewValidatorMock(big.NewInt(0), 0, []byte("B")), + NewValidatorMock(big.NewInt(0), 0, []byte("C")), + NewValidatorMock(big.NewInt(0), 0, []byte("D")), + NewValidatorMock(big.NewInt(0), 0, []byte("E")), + NewValidatorMock(big.NewInt(0), 0, []byte("F")), + NewValidatorMock(big.NewInt(0), 0, []byte("G")), + NewValidatorMock(big.NewInt(0), 0, []byte("H")), + NewValidatorMock(big.NewInt(0), 0, []byte("I")), + } + + return list, nil +} + +func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if vgsm.GetValidatorsPublicKeysCalled != nil { + return vgsm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := vgsm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (vgsm ValidatorGroupSelectorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (vgsm ValidatorGroupSelectorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} diff --git a/sharding/mock/validatorMock.go b/sharding/mock/validatorMock.go new file mode 100644 index 00000000000..cd22a51ce85 --- /dev/null +++ b/sharding/mock/validatorMock.go @@ -0,0 +1,27 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 4c327c4194b..15f7a1da3ec 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -4,8 +4,8 @@ import ( "encoding/hex" "testing" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding" ) func createNodesSetupOneShardOneNode() *sharding.NodesSetup { diff --git a/sharding/sharding.go b/sharding/sharding.go deleted file mode 100644 index 07b1fdaee17..00000000000 --- a/sharding/sharding.go +++ /dev/null @@ -1,17 +0,0 @@ -package sharding - -import ( - "github.com/ElrondNetwork/elrond-go/data/state" -) - -// MetachainShardId will be used to identify a shard ID as metachain -const MetachainShardId = uint32(0xFFFFFFFF) - -// Coordinator defines what a shard state coordinator should hold -type Coordinator interface { - NumberOfShards() uint32 - ComputeId(address state.AddressContainer) uint32 - SelfId() uint32 - SameShard(firstAddress, secondAddress state.AddressContainer) bool - CommunicationIdentifier(destShardID uint32) string -} diff --git a/consensus/validators/validator.go b/sharding/validator.go similarity index 97% rename from consensus/validators/validator.go rename to sharding/validator.go index 429250f19fe..e6d4a65913f 100644 --- a/consensus/validators/validator.go +++ b/sharding/validator.go @@ -1,4 +1,4 @@ -package validators +package sharding import ( "math/big" diff --git a/consensus/validators/validator_test.go b/sharding/validator_test.go similarity index 54% rename from consensus/validators/validator_test.go rename to sharding/validator_test.go index 9f0cefe817c..98ac93dff76 100644 --- a/consensus/validators/validator_test.go +++ b/sharding/validator_test.go @@ -1,44 +1,43 @@ -package validators_test +package sharding import ( "math/big" "testing" - "github.com/ElrondNetwork/elrond-go/consensus/validators" "github.com/stretchr/testify/assert" ) func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { t.Parallel() - validator, err := validators.NewValidator(nil, 0, []byte("pk1")) + validator, err := NewValidator(nil, 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilStake, err) + assert.Equal(t, ErrNilStake, err) } func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { t.Parallel() - validator, err := validators.NewValidator(big.NewInt(-1), 0, []byte("pk1")) + validator, err := NewValidator(big.NewInt(-1), 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, validators.ErrNegativeStake, err) + assert.Equal(t, ErrNegativeStake, err) } func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { t.Parallel() - validator, err := validators.NewValidator(big.NewInt(0), 0, nil) + validator, err := NewValidator(big.NewInt(0), 0, nil) assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilPubKey, err) + assert.Equal(t, ErrNilPubKey, err) } func TestValidator_NewValidatorShouldWork(t *testing.T) { t.Parallel() - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, err := NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.NotNil(t, validator) assert.Nil(t, err) @@ -47,7 +46,7 @@ func TestValidator_NewValidatorShouldWork(t *testing.T) { func TestValidator_StakeShouldWork(t *testing.T) { t.Parallel() - validator, _ := validators.NewValidator(big.NewInt(1), 0, []byte("pk1")) + validator, _ := NewValidator(big.NewInt(1), 0, []byte("pk1")) assert.Equal(t, big.NewInt(1), validator.Stake()) } @@ -55,7 +54,7 @@ func TestValidator_StakeShouldWork(t *testing.T) { func TestValidator_PubKeyShouldWork(t *testing.T) { t.Parallel() - validator, _ := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, _ := NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.Equal(t, []byte("pk1"), validator.PubKey()) } From 6ecf8799be87a347587df70e9725b951e19820bb Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 26 Jun 2019 17:24:10 +0300 Subject: [PATCH 004/234] sharding: move validator back to consensus --- consensus/errors.go | 14 +++++++++++++ consensus/interface.go | 8 +++++++ consensus/mock/validatorGroupSelectorMock.go | 7 +++++-- .../commonSubround/subroundStartRound_test.go | 9 +++++--- consensus/spos/consensusState_test.go | 5 ++--- {sharding => consensus}/validator.go | 2 +- {sharding => consensus}/validator_test.go | 21 ++++++++++--------- node/node.go | 2 +- sharding/errors.go | 9 -------- sharding/export_test.go | 2 +- sharding/indexHashedNodesCoordinator.go | 7 +++---- sharding/indexHashedNodesCoordinator_test.go | 5 ++--- sharding/interface.go | 6 ++++-- 13 files changed, 58 insertions(+), 39 deletions(-) create mode 100644 consensus/errors.go rename {sharding => consensus}/validator.go (97%) rename {sharding => consensus}/validator_test.go (55%) diff --git a/consensus/errors.go b/consensus/errors.go new file mode 100644 index 00000000000..a280f1a5f80 --- /dev/null +++ b/consensus/errors.go @@ -0,0 +1,14 @@ +package consensus + +import ( + "errors" +) + +// ErrNilStake signals that a nil stake structure has been provided +var ErrNilStake = errors.New("nil stake") + +// ErrNegativeStake signals that the stake is negative +var ErrNegativeStake = errors.New("negative stake") + +// ErrNilPubKey signals that the public key is nil +var ErrNilPubKey = errors.New("nil public key") diff --git a/consensus/interface.go b/consensus/interface.go index 90063b01192..8988133187f 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -4,6 +4,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "math/big" ) // Rounder defines the actions which should be handled by a round implementation @@ -47,6 +48,13 @@ type SposFactory interface { GenerateSubrounds() } +// Validator defines what a consensus validator implementation should do. +type Validator interface { + Stake() *big.Int + Rating() int32 + PubKey() []byte +} + // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index 8e0af6f606c..f3e5e05aa73 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -1,8 +1,8 @@ package mock import ( - "math/big" "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" ) type ValidatorGroupSelectorMock struct { @@ -10,7 +10,10 @@ type ValidatorGroupSelectorMock struct { GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) } -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { +func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup( + randomness []byte, +) (validatorsGroup []sharding.Validator, err error) { + if vgsm.ComputeValidatorsGroupCalled != nil { return vgsm.ComputeValidatorsGroupCalled(randomness) } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 3ff5707f065..fe2aaf73cb9 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -8,8 +8,8 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" - "github.com/stretchr/testify/assert" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.SubroundStartRound, error) { @@ -25,8 +25,11 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S return startRound, err } -func defaultSubround(consensusState *spos.ConsensusState, ch chan bool, container spos.ConsensusCoreHandler) (*spos.Subround, - error) { +func defaultSubround( + consensusState *spos.ConsensusState, + ch chan bool, container spos.ConsensusCoreHandler, +) (*spos.Subround, error) { + return spos.NewSubround( -1, int(SrStartRound), diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 6c3f8047561..e7cbe595463 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,13 +5,12 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/consensus/mock" + "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/stretchr/testify/assert" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) func internalInitConsensusState() *spos.ConsensusState { diff --git a/sharding/validator.go b/consensus/validator.go similarity index 97% rename from sharding/validator.go rename to consensus/validator.go index e6d4a65913f..ad182a66154 100644 --- a/sharding/validator.go +++ b/consensus/validator.go @@ -1,4 +1,4 @@ -package sharding +package consensus import ( "math/big" diff --git a/sharding/validator_test.go b/consensus/validator_test.go similarity index 55% rename from sharding/validator_test.go rename to consensus/validator_test.go index 98ac93dff76..73dbf7ff572 100644 --- a/sharding/validator_test.go +++ b/consensus/validator_test.go @@ -1,43 +1,44 @@ -package sharding +package consensus_test import ( "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/consensus" "github.com/stretchr/testify/assert" ) func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { t.Parallel() - validator, err := NewValidator(nil, 0, []byte("pk1")) + validator, err := consensus.NewValidator(nil, 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, ErrNilStake, err) + assert.Equal(t, consensus.ErrNilStake, err) } func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { t.Parallel() - validator, err := NewValidator(big.NewInt(-1), 0, []byte("pk1")) + validator, err := consensus.NewValidator(big.NewInt(-1), 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, ErrNegativeStake, err) + assert.Equal(t, consensus.ErrNegativeStake, err) } func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { t.Parallel() - validator, err := NewValidator(big.NewInt(0), 0, nil) + validator, err := consensus.NewValidator(big.NewInt(0), 0, nil) assert.Nil(t, validator) - assert.Equal(t, ErrNilPubKey, err) + assert.Equal(t, consensus.ErrNilPubKey, err) } func TestValidator_NewValidatorShouldWork(t *testing.T) { t.Parallel() - validator, err := NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.NotNil(t, validator) assert.Nil(t, err) @@ -46,7 +47,7 @@ func TestValidator_NewValidatorShouldWork(t *testing.T) { func TestValidator_StakeShouldWork(t *testing.T) { t.Parallel() - validator, _ := NewValidator(big.NewInt(1), 0, []byte("pk1")) + validator, _ := consensus.NewValidator(big.NewInt(1), 0, []byte("pk1")) assert.Equal(t, big.NewInt(1), validator.Stake()) } @@ -54,7 +55,7 @@ func TestValidator_StakeShouldWork(t *testing.T) { func TestValidator_PubKeyShouldWork(t *testing.T) { t.Parallel() - validator, _ := NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, _ := consensus.NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.Equal(t, []byte("pk1"), validator.PubKey()) } diff --git a/node/node.go b/node/node.go index a7a643fa8e4..6bcbeb943d2 100644 --- a/node/node.go +++ b/node/node.go @@ -502,7 +502,7 @@ func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { nodesMap[sh] = make([]sharding.Validator, nodesInShard) for i := 0; i < nodesInShard; i++ { - validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) + validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) if err != nil { return nil, err } diff --git a/sharding/errors.go b/sharding/errors.go index e324ab5e5ca..08ba0eeff57 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -37,15 +37,6 @@ var ErrMinNodesPerShardSmallerThanConsensusSize = errors.New("minimum nodes per // ErrNodesSizeSmallerThanMinNoOfNodes signals that there are not enough nodes defined in genesis file var ErrNodesSizeSmallerThanMinNoOfNodes = errors.New("length of nodes defined is smaller than min nodes per shard required") -// ErrNilStake signals that a nil stake structure has been provided -var ErrNilStake = errors.New("nil stake") - -// ErrNegativeStake signals that the stake is negative -var ErrNegativeStake = errors.New("negative stake") - -// ErrNilPubKey signals that the public key is nil -var ErrNilPubKey = errors.New("nil public key") - // ErrNilInputNodesMap signals that a nil nodes map was provided var ErrNilInputNodesMap = errors.New("nil input nodes map") diff --git a/sharding/export_test.go b/sharding/export_test.go index b7f35d44713..85575748413 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -34,4 +34,4 @@ func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) stri func (ihgs *indexHashedNodesCoordinator) EligibleList() []Validator { return ihgs.nodesMap[ihgs.shardId] -} \ No newline at end of file +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 06873cc3206..c2289893ca0 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -8,9 +8,6 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" ) -const defaultRating = 0 -const defaultStake = 0 - type indexHashedNodesCoordinator struct { nbShards uint32 shardId uint32 @@ -157,9 +154,11 @@ func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, rand computedLargeIndex := big.NewInt(0) computedLargeIndex.SetBytes(indexHash) + lenExpandedEligibleList := big.NewInt(int64(len(ihgs.expandedEligibleList))) // computedListIndex = computedLargeIndex % len(expandedEligibleList) - computedListIndex := big.NewInt(0).Mod(computedLargeIndex, big.NewInt(int64(len(ihgs.expandedEligibleList)))).Int64() + computedListIndex := big.NewInt(0).Mod(computedLargeIndex, lenExpandedEligibleList).Int64() + return int(computedListIndex) } diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index e18029a4b0f..7610b7f5032 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -6,9 +6,9 @@ import ( "strconv" "testing" - "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go/sharding/mock" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding/mock" + "github.com/stretchr/testify/assert" ) func convertBigIntToBytes(value *big.Int) []byte { @@ -374,7 +374,6 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. for i := 0; i < b.N; i++ { randomness := strconv.Itoa(i) - list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness)) assert.Equal(b, consensusGroupSize, len(list2)) diff --git a/sharding/interface.go b/sharding/interface.go index de1691f1dd4..0fccc459eb2 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -1,8 +1,9 @@ package sharding import ( - "github.com/ElrondNetwork/elrond-go/data/state" "math/big" + + "github.com/ElrondNetwork/elrond-go/data/state" ) // MetachainShardId will be used to identify a shard ID as metachain @@ -17,7 +18,8 @@ type Coordinator interface { CommunicationIdentifier(destShardID uint32) string } -// Validator defines what a consensus validator implementation should do. +// Validator defines a node that can be allocated to a shard for participation in a consensus group as validator +// or block proposer type Validator interface { Stake() *big.Int Rating() int32 From a367d24707bfe79ae9ad0e0c32b2c0566d65206c Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 5 Jul 2019 15:46:21 +0300 Subject: [PATCH 005/234] started fee implementation --- data/feeTx/capnp/schema.capnp | 19 ++ data/feeTx/capnp/schema.capnp.go | 360 +++++++++++++++++++++++++++++++ data/feeTx/feeTx.go | 116 ++++++++++ data/feeTx/feeTx_test.go | 106 +++++++++ 4 files changed, 601 insertions(+) create mode 100644 data/feeTx/capnp/schema.capnp create mode 100644 data/feeTx/capnp/schema.capnp.go create mode 100644 data/feeTx/feeTx.go create mode 100644 data/feeTx/feeTx_test.go diff --git a/data/feeTx/capnp/schema.capnp b/data/feeTx/capnp/schema.capnp new file mode 100644 index 00000000000..c3d9ec90949 --- /dev/null +++ b/data/feeTx/capnp/schema.capnp @@ -0,0 +1,19 @@ +@0xff99b03cb6309633; +using Go = import "/go.capnp"; +$Go.package("capnp"); +$Go.import("_"); + + +struct SmartContractResultCapn { + nonce @0: UInt64; + value @1: Data; + rcvAddr @2: Data; + txHash @6: Data; +} + +##compile with: + +## +## +## capnp compile -ogo ./schema.capnp + diff --git a/data/feeTx/capnp/schema.capnp.go b/data/feeTx/capnp/schema.capnp.go new file mode 100644 index 00000000000..bfb8de674fd --- /dev/null +++ b/data/feeTx/capnp/schema.capnp.go @@ -0,0 +1,360 @@ +package capnp + +// AUTO GENERATED - DO NOT EDIT +import ( + "bufio" + "bytes" + "encoding/json" + C "github.com/glycerine/go-capnproto" + "io" +) + +type SmartContractResultCapn C.Struct + +func NewSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { + return SmartContractResultCapn(s.NewStruct(8, 6)) +} +func NewRootSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { + return SmartContractResultCapn(s.NewRootStruct(8, 6)) +} +func AutoNewSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { + return SmartContractResultCapn(s.NewStructAR(8, 6)) +} +func ReadRootSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { + return SmartContractResultCapn(s.Root(0).ToStruct()) +} +func (s SmartContractResultCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } +func (s SmartContractResultCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } +func (s SmartContractResultCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s SmartContractResultCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s SmartContractResultCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) SndAddr() []byte { return C.Struct(s).GetObject(2).ToData() } +func (s SmartContractResultCapn) SetSndAddr(v []byte) { C.Struct(s).SetObject(2, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) Code() []byte { return C.Struct(s).GetObject(3).ToData() } +func (s SmartContractResultCapn) SetCode(v []byte) { C.Struct(s).SetObject(3, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) Data() []byte { return C.Struct(s).GetObject(4).ToData() } +func (s SmartContractResultCapn) SetData(v []byte) { C.Struct(s).SetObject(4, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) TxHash() []byte { return C.Struct(s).GetObject(5).ToData() } +func (s SmartContractResultCapn) SetTxHash(v []byte) { C.Struct(s).SetObject(5, s.Segment.NewData(v)) } +func (s SmartContractResultCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"nonce\":") + if err != nil { + return err + } + { + s := s.Nonce() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"value\":") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"rcvAddr\":") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"sndAddr\":") + if err != nil { + return err + } + { + s := s.SndAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"code\":") + if err != nil { + return err + } + { + s := s.Code() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"data\":") + if err != nil { + return err + } + { + s := s.Data() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"txHash\":") + if err != nil { + return err + } + { + s := s.TxHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s SmartContractResultCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s SmartContractResultCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("nonce = ") + if err != nil { + return err + } + { + s := s.Nonce() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("value = ") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("rcvAddr = ") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("sndAddr = ") + if err != nil { + return err + } + { + s := s.SndAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("code = ") + if err != nil { + return err + } + { + s := s.Code() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("data = ") + if err != nil { + return err + } + { + s := s.Data() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("txHash = ") + if err != nil { + return err + } + { + s := s.TxHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s SmartContractResultCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type SmartContractResultCapn_List C.PointerList + +func NewSmartContractResultCapnList(s *C.Segment, sz int) SmartContractResultCapn_List { + return SmartContractResultCapn_List(s.NewCompositeList(8, 6, sz)) +} +func (s SmartContractResultCapn_List) Len() int { return C.PointerList(s).Len() } +func (s SmartContractResultCapn_List) At(i int) SmartContractResultCapn { + return SmartContractResultCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s SmartContractResultCapn_List) ToArray() []SmartContractResultCapn { + n := s.Len() + a := make([]SmartContractResultCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s SmartContractResultCapn_List) Set(i int, item SmartContractResultCapn) { + C.PointerList(s).Set(i, C.Object(item)) +} diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go new file mode 100644 index 00000000000..5550ba2dd87 --- /dev/null +++ b/data/feeTx/feeTx.go @@ -0,0 +1,116 @@ +package feeTx + +import ( + "io" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/feeTx/capnp" + "github.com/glycerine/go-capnproto" +) + +// FeeTx holds all the data needed for a value transfer +type FeeTx struct { + Nonce uint64 `capid:"0" json:"nonce"` + Value *big.Int `capid:"1" json:"value"` + RcvAddr []byte `capid:"2" json:"receiver"` + TxHash []byte `capid:"6" json:"txHash"` +} + +// Save saves the serialized data of a FeeTx into a stream through Capnp protocol +func (scr *FeeTx) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + FeeTxGoToCapn(seg, scr) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a FeeTx object through Capnp protocol +func (scr *FeeTx) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + + z := capnp.ReadRootFeeTxCapn(capMsg) + FeeTxCapnToGo(z, scr) + return nil +} + +// FeeTxCapnToGo is a helper function to copy fields from a FeeTxCapn object to a FeeTx object +func FeeTxCapnToGo(src capnp.FeeTxCapn, dest *FeeTx) *FeeTx { + if dest == nil { + dest = &FeeTx{} + } + + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + + dest.Nonce = src.Nonce() + err := dest.Value.GobDecode(src.Value()) + + if err != nil { + return nil + } + + dest.RcvAddr = src.RcvAddr() + dest.TxHash = src.TxHash() + + return dest +} + +// FeeTxGoToCapn is a helper function to copy fields from a FeeTx object to a FeeTxCapn object +func FeeTxGoToCapn(seg *capn.Segment, src *FeeTx) capnp.FeeTxCapn { + dest := capnp.AutoNewFeeTxCapn(seg) + + value, _ := src.Value.GobEncode() + dest.SetNonce(src.Nonce) + dest.SetValue(value) + dest.SetRcvAddr(src.RcvAddr) + dest.SetTxHash(src.TxHash) + + return dest +} + +// IsInterfaceNil verifies if underlying object is nil +func (scr *FeeTx) IsInterfaceNil() bool { + return scr == nil +} + +// GetValue returns the value of the fee transaction +func (scr *FeeTx) GetValue() *big.Int { + return scr.Value +} + +// GetData returns the data of the fee transaction +func (scr *FeeTx) GetData() []byte { + return nil +} + +// GetRecvAddress returns the receiver address from the fee transaction +func (scr *FeeTx) GetRecvAddress() []byte { + return scr.RcvAddr +} + +// GetSndAddress returns the sender address from the fee transaction +func (scr *FeeTx) GetSndAddress() []byte { + return nil +} + +// SetValue sets the value of the fee transaction +func (scr *FeeTx) SetValue(value *big.Int) { + scr.Value = value +} + +// SetData sets the data of the fee transaction +func (scr *FeeTx) SetData(data []byte) { +} + +// SetRecvAddress sets the receiver address of the fee transaction +func (scr *FeeTx) SetRecvAddress(addr []byte) { + scr.RcvAddr = addr +} + +// SetSndAddress sets the sender address of the fee transaction +func (scr *FeeTx) SetSndAddress(addr []byte) { +} diff --git a/data/feeTx/feeTx_test.go b/data/feeTx/feeTx_test.go new file mode 100644 index 00000000000..68b34a988ad --- /dev/null +++ b/data/feeTx/feeTx_test.go @@ -0,0 +1,106 @@ +package feeTx_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/stretchr/testify/assert" +) + +func TestSmartContractResult_SaveLoad(t *testing.T) { + smrS := smartContractResult.SmartContractResult{ + Nonce: uint64(1), + Value: big.NewInt(1), + RcvAddr: []byte("receiver_address"), + SndAddr: []byte("sender_address"), + Data: []byte("scr_data"), + Code: []byte("code"), + TxHash: []byte("scrHash"), + } + + var b bytes.Buffer + smrS.Save(&b) + + loadSMR := smartContractResult.SmartContractResult{} + loadSMR.Load(&b) + + assert.Equal(t, smrS, loadSMR) +} + +func TestSmartContractResult_GetData(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{Data: data} + + assert.Equal(t, data, scr.Data) +} + +func TestSmartContractResult_GetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{RcvAddr: data} + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestSmartContractResult_GetSndAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{SndAddr: data} + + assert.Equal(t, data, scr.SndAddr) +} + +func TestSmartContractResult_GetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &smartContractResult.SmartContractResult{Value: value} + + assert.Equal(t, value, scr.Value) +} + +func TestSmartContractResult_SetData(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{} + scr.SetData(data) + + assert.Equal(t, data, scr.Data) +} + +func TestSmartContractResult_SetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{} + scr.SetRecvAddress(data) + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestSmartContractResult_SetSndAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &smartContractResult.SmartContractResult{} + scr.SetSndAddress(data) + + assert.Equal(t, data, scr.SndAddr) +} + +func TestSmartContractResult_SetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &smartContractResult.SmartContractResult{} + scr.SetValue(value) + + assert.Equal(t, value, scr.Value) +} From 7ab354205c70e93166a5fae2556bab64b2632599 Mon Sep 17 00:00:00 2001 From: ccorcoveanu Date: Fri, 5 Jul 2019 15:49:52 +0300 Subject: [PATCH 006/234] Update capnp schema --- data/feeTx/capnp/schema.capnp | 6 +- data/feeTx/capnp/schema.capnp.go | 183 +++++-------------------------- 2 files changed, 29 insertions(+), 160 deletions(-) diff --git a/data/feeTx/capnp/schema.capnp b/data/feeTx/capnp/schema.capnp index c3d9ec90949..a21ed154286 100644 --- a/data/feeTx/capnp/schema.capnp +++ b/data/feeTx/capnp/schema.capnp @@ -4,16 +4,16 @@ $Go.package("capnp"); $Go.import("_"); -struct SmartContractResultCapn { +struct FeeTxCapn { nonce @0: UInt64; value @1: Data; rcvAddr @2: Data; - txHash @6: Data; + txHash @3: Data; } ##compile with: ## ## -## capnp compile -ogo ./schema.capnp +## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/feeTx/capnp/schema.capnp diff --git a/data/feeTx/capnp/schema.capnp.go b/data/feeTx/capnp/schema.capnp.go index bfb8de674fd..dc5edc6f192 100644 --- a/data/feeTx/capnp/schema.capnp.go +++ b/data/feeTx/capnp/schema.capnp.go @@ -1,6 +1,7 @@ package capnp // AUTO GENERATED - DO NOT EDIT + import ( "bufio" "bytes" @@ -9,35 +10,21 @@ import ( "io" ) -type SmartContractResultCapn C.Struct +type FeeTxCapn C.Struct -func NewSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { - return SmartContractResultCapn(s.NewStruct(8, 6)) -} -func NewRootSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { - return SmartContractResultCapn(s.NewRootStruct(8, 6)) -} -func AutoNewSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { - return SmartContractResultCapn(s.NewStructAR(8, 6)) -} -func ReadRootSmartContractResultCapn(s *C.Segment) SmartContractResultCapn { - return SmartContractResultCapn(s.Root(0).ToStruct()) -} -func (s SmartContractResultCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } -func (s SmartContractResultCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } -func (s SmartContractResultCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } -func (s SmartContractResultCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } -func (s SmartContractResultCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) SndAddr() []byte { return C.Struct(s).GetObject(2).ToData() } -func (s SmartContractResultCapn) SetSndAddr(v []byte) { C.Struct(s).SetObject(2, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) Code() []byte { return C.Struct(s).GetObject(3).ToData() } -func (s SmartContractResultCapn) SetCode(v []byte) { C.Struct(s).SetObject(3, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) Data() []byte { return C.Struct(s).GetObject(4).ToData() } -func (s SmartContractResultCapn) SetData(v []byte) { C.Struct(s).SetObject(4, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) TxHash() []byte { return C.Struct(s).GetObject(5).ToData() } -func (s SmartContractResultCapn) SetTxHash(v []byte) { C.Struct(s).SetObject(5, s.Segment.NewData(v)) } -func (s SmartContractResultCapn) WriteJSON(w io.Writer) error { +func NewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStruct(8, 3)) } +func NewRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewRootStruct(8, 3)) } +func AutoNewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStructAR(8, 3)) } +func ReadRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.Root(0).ToStruct()) } +func (s FeeTxCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } +func (s FeeTxCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } +func (s FeeTxCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s FeeTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s FeeTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s FeeTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s FeeTxCapn) TxHash() []byte { return C.Struct(s).GetObject(2).ToData() } +func (s FeeTxCapn) SetTxHash(v []byte) { C.Struct(s).SetObject(2, s.Segment.NewData(v)) } +func (s FeeTxCapn) WriteJSON(w io.Writer) error { b := bufio.NewWriter(w) var err error var buf []byte @@ -103,63 +90,6 @@ func (s SmartContractResultCapn) WriteJSON(w io.Writer) error { if err != nil { return err } - _, err = b.WriteString("\"sndAddr\":") - if err != nil { - return err - } - { - s := s.SndAddr() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"code\":") - if err != nil { - return err - } - { - s := s.Code() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"data\":") - if err != nil { - return err - } - { - s := s.Data() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } _, err = b.WriteString("\"txHash\":") if err != nil { return err @@ -182,12 +112,12 @@ func (s SmartContractResultCapn) WriteJSON(w io.Writer) error { err = b.Flush() return err } -func (s SmartContractResultCapn) MarshalJSON() ([]byte, error) { +func (s FeeTxCapn) MarshalJSON() ([]byte, error) { b := bytes.Buffer{} err := s.WriteJSON(&b) return b.Bytes(), err } -func (s SmartContractResultCapn) WriteCapLit(w io.Writer) error { +func (s FeeTxCapn) WriteCapLit(w io.Writer) error { b := bufio.NewWriter(w) var err error var buf []byte @@ -253,63 +183,6 @@ func (s SmartContractResultCapn) WriteCapLit(w io.Writer) error { if err != nil { return err } - _, err = b.WriteString("sndAddr = ") - if err != nil { - return err - } - { - s := s.SndAddr() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("code = ") - if err != nil { - return err - } - { - s := s.Code() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("data = ") - if err != nil { - return err - } - { - s := s.Data() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } _, err = b.WriteString("txHash = ") if err != nil { return err @@ -332,29 +205,25 @@ func (s SmartContractResultCapn) WriteCapLit(w io.Writer) error { err = b.Flush() return err } -func (s SmartContractResultCapn) MarshalCapLit() ([]byte, error) { +func (s FeeTxCapn) MarshalCapLit() ([]byte, error) { b := bytes.Buffer{} err := s.WriteCapLit(&b) return b.Bytes(), err } -type SmartContractResultCapn_List C.PointerList +type FeeTxCapn_List C.PointerList -func NewSmartContractResultCapnList(s *C.Segment, sz int) SmartContractResultCapn_List { - return SmartContractResultCapn_List(s.NewCompositeList(8, 6, sz)) -} -func (s SmartContractResultCapn_List) Len() int { return C.PointerList(s).Len() } -func (s SmartContractResultCapn_List) At(i int) SmartContractResultCapn { - return SmartContractResultCapn(C.PointerList(s).At(i).ToStruct()) +func NewFeeTxCapnList(s *C.Segment, sz int) FeeTxCapn_List { + return FeeTxCapn_List(s.NewCompositeList(8, 3, sz)) } -func (s SmartContractResultCapn_List) ToArray() []SmartContractResultCapn { +func (s FeeTxCapn_List) Len() int { return C.PointerList(s).Len() } +func (s FeeTxCapn_List) At(i int) FeeTxCapn { return FeeTxCapn(C.PointerList(s).At(i).ToStruct()) } +func (s FeeTxCapn_List) ToArray() []FeeTxCapn { n := s.Len() - a := make([]SmartContractResultCapn, n) + a := make([]FeeTxCapn, n) for i := 0; i < n; i++ { a[i] = s.At(i) } return a } -func (s SmartContractResultCapn_List) Set(i int, item SmartContractResultCapn) { - C.PointerList(s).Set(i, C.Object(item)) -} +func (s FeeTxCapn_List) Set(i int, item FeeTxCapn) { C.PointerList(s).Set(i, C.Object(item)) } From a11451ae2130278c1758e6ff212e88f5f6462177 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 8 Jul 2019 09:22:10 +0300 Subject: [PATCH 007/234] started fee implementation --- data/feeTx/feeTx_test.go | 71 ++++++--------------------- process/errors.go | 6 +++ process/interface.go | 17 +++++++ process/smartContract/process.go | 6 +++ process/transaction/process.go | 52 ++++++++++++++++++++ process/unsigned/feeTxHandler.go | 54 ++++++++++++++++++++ process/unsigned/feeTxHandler_test.go | 1 + 7 files changed, 152 insertions(+), 55 deletions(-) create mode 100644 process/unsigned/feeTxHandler.go create mode 100644 process/unsigned/feeTxHandler_test.go diff --git a/data/feeTx/feeTx_test.go b/data/feeTx/feeTx_test.go index 68b34a988ad..886593bf278 100644 --- a/data/feeTx/feeTx_test.go +++ b/data/feeTx/feeTx_test.go @@ -5,101 +5,62 @@ import ( "math/big" "testing" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/stretchr/testify/assert" ) -func TestSmartContractResult_SaveLoad(t *testing.T) { - smrS := smartContractResult.SmartContractResult{ +func TestFeeTx_SaveLoad(t *testing.T) { + smrS := feeTx.FeeTx{ Nonce: uint64(1), Value: big.NewInt(1), RcvAddr: []byte("receiver_address"), - SndAddr: []byte("sender_address"), - Data: []byte("scr_data"), - Code: []byte("code"), TxHash: []byte("scrHash"), } var b bytes.Buffer - smrS.Save(&b) + err := smrS.Save(&b) + assert.Nil(t, err) - loadSMR := smartContractResult.SmartContractResult{} - loadSMR.Load(&b) + loadSMR := feeTx.FeeTx{} + err = loadSMR.Load(&b) + assert.Nil(t, err) assert.Equal(t, smrS, loadSMR) } -func TestSmartContractResult_GetData(t *testing.T) { +func TestFeeTx_GetRecvAddr(t *testing.T) { t.Parallel() data := []byte("data") - scr := &smartContractResult.SmartContractResult{Data: data} - - assert.Equal(t, data, scr.Data) -} - -func TestSmartContractResult_GetRecvAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &smartContractResult.SmartContractResult{RcvAddr: data} + scr := &feeTx.FeeTx{RcvAddr: data} assert.Equal(t, data, scr.RcvAddr) } -func TestSmartContractResult_GetSndAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &smartContractResult.SmartContractResult{SndAddr: data} - - assert.Equal(t, data, scr.SndAddr) -} - -func TestSmartContractResult_GetValue(t *testing.T) { +func TestFeeTx_GetValue(t *testing.T) { t.Parallel() value := big.NewInt(10) - scr := &smartContractResult.SmartContractResult{Value: value} + scr := &feeTx.FeeTx{Value: value} assert.Equal(t, value, scr.Value) } -func TestSmartContractResult_SetData(t *testing.T) { +func TestFeeTx_SetRecvAddr(t *testing.T) { t.Parallel() data := []byte("data") - scr := &smartContractResult.SmartContractResult{} - scr.SetData(data) - - assert.Equal(t, data, scr.Data) -} - -func TestSmartContractResult_SetRecvAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &smartContractResult.SmartContractResult{} + scr := &feeTx.FeeTx{} scr.SetRecvAddress(data) assert.Equal(t, data, scr.RcvAddr) } -func TestSmartContractResult_SetSndAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &smartContractResult.SmartContractResult{} - scr.SetSndAddress(data) - - assert.Equal(t, data, scr.SndAddr) -} - -func TestSmartContractResult_SetValue(t *testing.T) { +func TestFeeTx_SetValue(t *testing.T) { t.Parallel() value := big.NewInt(10) - scr := &smartContractResult.SmartContractResult{} + scr := &feeTx.FeeTx{} scr.SetValue(value) assert.Equal(t, value, scr.Value) diff --git a/process/errors.go b/process/errors.go index 72dd2f1298f..5c84184bfa5 100644 --- a/process/errors.go +++ b/process/errors.go @@ -402,3 +402,9 @@ var ErrNilIntermediateProcessorContainer = errors.New("intermediate processor co // ErrNilPreProcessorsContainer signals that preprocessors container is nil var ErrNilPreProcessorsContainer = errors.New("preprocessors container is nil") + +// ErrNotEnoughFeeInTransactions signals that the transaction does not enough fee +var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") + +// ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil +var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") diff --git a/process/interface.go b/process/interface.go index 5d4d3bb5286..9ac0656b05e 100644 --- a/process/interface.go +++ b/process/interface.go @@ -67,6 +67,23 @@ type IntermediateTransactionHandler interface { VerifyInterMiniBlocks(body block.Body) error } +// TransactionVerifier interface validates if the transaction is good and if it should be processed +type TransactionVerifier interface { + IsTransactionValid(tx data.TransactionHandler) error +} + +// UnsignedTxHandler creates and verifies unsigned transactions for current round +type UnsignedTxHandler interface { + CleanProcessedUTxs() + AddProcessedUTx(tx data.TransactionHandler) + CreateAllUTxs() []data.TransactionHandler + VerifyCreatedUTxs(block block.Body) +} + +type SpecialAddressHandler interface { + GetMyOwnAddress() []byte +} + // Preprocessor is an interface used to prepare and process transaction data type PreProcessor interface { CreateBlockStarted() diff --git a/process/smartContract/process.go b/process/smartContract/process.go index c1fce63afb0..26703c1e3d3 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -40,6 +40,7 @@ type scProcessor struct { mapExecState map[uint32]scExecutionState scrForwarder process.IntermediateTransactionHandler + txFeeHandler process.UnsignedTxHandler } var log = logger.DefaultLogger() @@ -55,6 +56,7 @@ func NewSmartContractProcessor( adrConv state.AddressConverter, coordinator sharding.Coordinator, scrForwarder process.IntermediateTransactionHandler, + txFeeHandler process.UnsignedTxHandler, ) (*scProcessor, error) { if vm == nil { return nil, process.ErrNoVM @@ -83,6 +85,9 @@ func NewSmartContractProcessor( if scrForwarder == nil { return nil, process.ErrNilIntermediateTransactionHandler } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } return &scProcessor{ vm: vm, @@ -94,6 +99,7 @@ func NewSmartContractProcessor( adrConv: adrConv, shardCoordinator: coordinator, scrForwarder: scrForwarder, + txFeeHandler: txFeeHandler, mapExecState: make(map[uint32]scExecutionState)}, nil } diff --git a/process/transaction/process.go b/process/transaction/process.go index 4dde91cfb64..44fb3f82996 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -2,6 +2,7 @@ package transaction import ( "bytes" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "math/big" "github.com/ElrondNetwork/elrond-go/core/logger" @@ -15,6 +16,9 @@ import ( var log = logger.DefaultLogger() +const MinGasPrice = 1 +const MinTxFee = 1 + // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { accounts state.AccountsAdapter @@ -22,6 +26,7 @@ type txProcessor struct { hasher hashing.Hasher scProcessor process.SmartContractProcessor marshalizer marshal.Marshalizer + txFeeHandler process.UnsignedTxHandler shardCoordinator sharding.Coordinator } @@ -32,6 +37,7 @@ func NewTxProcessor( addressConv state.AddressConverter, marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, + txFeeHandler process.UnsignedTxHandler, scProcessor process.SmartContractProcessor, ) (*txProcessor, error) { @@ -53,6 +59,9 @@ func NewTxProcessor( if scProcessor == nil { return nil, process.ErrNilSmartContractProcessor } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } return &txProcessor{ accounts: accounts, @@ -61,6 +70,7 @@ func NewTxProcessor( marshalizer: marshalizer, shardCoordinator: shardCoordinator, scProcessor: scProcessor, + txFeeHandler: txFeeHandler, }, nil } @@ -92,6 +102,41 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return process.ErrWrongTransaction } +func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*feeTx.FeeTx, error) { + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + 1 + minFee := big.NewInt(0) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(MinGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(MinTxFee)) + + if minFee.Cmp(cost) < 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + currFeeTx := &feeTx.FeeTx{ + Nonce: tx.Nonce, + Value: cost, + } + + return currFeeTx, nil +} + func (txProc *txProcessor) processMoveBalance( tx *transaction.Transaction, adrSrc, adrDst state.AddressContainer, @@ -104,6 +149,11 @@ func (txProc *txProcessor) processMoveBalance( return err } + currFeeTx, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } + value := tx.Value // is sender address in node shard if acntSrc != nil { @@ -126,6 +176,8 @@ func (txProc *txProcessor) processMoveBalance( } } + txProc.txFeeHandler.AddProcessedUTx(currFeeTx) + return nil } diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go new file mode 100644 index 00000000000..b32b121a2be --- /dev/null +++ b/process/unsigned/feeTxHandler.go @@ -0,0 +1,54 @@ +package unsigned + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/feeTx" + "github.com/ElrondNetwork/elrond-go/process" + "sync" +) + +type feeTxHandler struct { + address process.SpecialAddressHandler + mutTxs sync.Mutex + feeTxs []*feeTx.FeeTx +} + +func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, error) { + ftxh := &feeTxHandler{ + address: address, + } + ftxh.feeTxs = make([]*feeTx.FeeTx, 0) + + return ftxh, nil +} + +// CleanProcessedUTxs deletes the cached data +func (ftxh *feeTxHandler) CleanProcessedUTxs() { + ftxh.mutTxs.Lock() + ftxh.feeTxs = make([]*feeTx.FeeTx, 0) + ftxh.mutTxs.Unlock() +} + +// AddProcessedUTx adds a new feeTx to the cache +func (ftxh *feeTxHandler) AddProcessedUTx(tx data.TransactionHandler) { + currFeeTx, ok := tx.(*feeTx.FeeTx) + if !ok { + log.Debug(process.ErrWrongTypeAssertion.Error()) + } + + ftxh.mutTxs.Lock() + ftxh.feeTxs = append(ftxh.feeTxs, currFeeTx) + ftxh.mutTxs.Unlock() +} + +// CreateAllUtxs creates all the needed fee transactions +// According to economic paper 50% burn, 40% to the leader, 10% to Elrond community fund +func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { + panic("implement me") +} + +// VerifyCreatedUTxs +func (ftxh *feeTxHandler) VerifyCreatedUTxs(block block.Body) { + panic("implement me") +} diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/feeTxHandler_test.go new file mode 100644 index 00000000000..02e9d600f77 --- /dev/null +++ b/process/unsigned/feeTxHandler_test.go @@ -0,0 +1 @@ +package unsigned From 6f21f0b4409f9d9229e94a796e7b845ee5378336 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 8 Jul 2019 18:35:23 +0300 Subject: [PATCH 008/234] fixing unit tests and integration tests. --- data/feeTx/feeTx.go | 2 +- .../mock/unsignedTxHandlerMock.go | 45 +++ .../block/executingMiniblocks_test.go | 35 ++- .../multiShard/block/testInitializer.go | 1 + .../state/stateExecTransaction_test.go | 86 ++++-- integrationTests/vm/testInitializer.go | 22 +- process/interface.go | 3 +- process/mock/unsignedTxHandlerMock.go | 45 +++ process/smartContract/export_test.go | 15 +- process/smartContract/process.go | 54 ++-- process/smartContract/process_test.go | 280 +++++++++++++----- process/transaction/export_test.go | 15 + process/transaction/process.go | 8 +- process/transaction/process_test.go | 77 ++++- process/unsigned/feeTxHandler.go | 68 ++++- 15 files changed, 600 insertions(+), 156 deletions(-) create mode 100644 integrationTests/mock/unsignedTxHandlerMock.go create mode 100644 process/mock/unsignedTxHandlerMock.go diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go index 5550ba2dd87..440af8b8448 100644 --- a/data/feeTx/feeTx.go +++ b/data/feeTx/feeTx.go @@ -13,7 +13,7 @@ type FeeTx struct { Nonce uint64 `capid:"0" json:"nonce"` Value *big.Int `capid:"1" json:"value"` RcvAddr []byte `capid:"2" json:"receiver"` - TxHash []byte `capid:"6" json:"txHash"` + TxHash []byte `capid:"3" json:"txHash"` } // Save saves the serialized data of a FeeTx into a stream through Capnp protocol diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..02288975fc0 --- /dev/null +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -0,0 +1,45 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + AddProcessedUTxCalled func(tx data.TransactionHandler) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func(header data.HeaderHandler, body block.Body) error +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() + return +} + +func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { + if ut.AddProcessedUTxCalled == nil { + return + } + + ut.AddProcessedUTxCalled(tx) + return +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs(header data.HeaderHandler, body block.Body) error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled(header, body) +} diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index aed9f093dd5..de167cb1419 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -34,6 +34,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { valMinting := big.NewInt(100) valToTransferPerTx := big.NewInt(2) + gasPricePerTx := uint64(2) + gasLimitPerTx := uint64(2) advertiser := createMessengerWithKadDht(context.Background(), "") advertiser.Bootstrap() @@ -79,7 +81,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } fmt.Println("Step 3. Generating transactions...") - generateAndDisseminateTxs(proposerNode.node, sendersPrivateKeys, receiversPrivateKeys, valToTransferPerTx) + generateAndDisseminateTxs(proposerNode.node, sendersPrivateKeys, receiversPrivateKeys, valToTransferPerTx, gasPricePerTx, gasPricePerTx) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) @@ -170,6 +172,12 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { for _, sk := range sendersPrivateKeys { valTransferred := big.NewInt(0).Mul(valToTransferPerTx, big.NewInt(int64(len(receiversPrivateKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) + + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(gasPricePerTx), big.NewInt(0).SetUint64(gasLimitPerTx)) + consumedFee = consumedFee.Mul(consumedFee, big.NewInt(int64(len(receiversPrivateKeys)))) + + valRemaining = valRemaining.Sub(valRemaining, consumedFee) testPrivateKeyHasBalance(t, n, sk, valRemaining) } //test receiver balances from same shard @@ -378,6 +386,8 @@ func generateAndDisseminateTxs( senders []crypto.PrivateKey, receiversPrivateKeys map[uint32][]crypto.PrivateKey, valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { for i := 0; i < len(senders); i++ { @@ -385,14 +395,14 @@ func generateAndDisseminateTxs( incrementalNonce := uint64(0) for _, recvPrivateKeys := range receiversPrivateKeys { receiverKey := recvPrivateKeys[i] - tx := generateTransferTx(incrementalNonce, senderKey, receiverKey, valToTransfer) + tx := generateTransferTx(incrementalNonce, senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) n.SendTransaction( tx.Nonce, hex.EncodeToString(tx.SndAddr), hex.EncodeToString(tx.RcvAddr), tx.Value, - 0, - 0, + tx.GasPrice, + tx.GasLimit, string(tx.Data), tx.Signature, ) @@ -405,14 +415,19 @@ func generateTransferTx( nonce uint64, sender crypto.PrivateKey, receiver crypto.PrivateKey, - valToTransfer *big.Int) *transaction.Transaction { + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) *transaction.Transaction { tx := transaction.Transaction{ - Nonce: nonce, - Value: valToTransfer, - RcvAddr: skToPk(receiver), - SndAddr: skToPk(sender), - Data: make([]byte, 0), + Nonce: nonce, + Value: valToTransfer, + RcvAddr: skToPk(receiver), + SndAddr: skToPk(sender), + Data: make([]byte, 0), + GasLimit: gasLimit, + GasPrice: gasPrice, } txBuff, _ := testMarshalizer.Marshal(&tx) signer := &singlesig.SchnorrSigner{} diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 461117b5208..856bd700369 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -255,6 +255,7 @@ func createNetNode( testMarshalizer, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) fact, _ := shard.NewPreProcessorsContainerFactory( diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index 644a9da2e55..98f71ce4c5c 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -32,7 +32,15 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := transaction.NewTxProcessor( + accnts, + hasher, + addrConv, + marshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + ) nonce := uint64(6) balance := big.NewInt(10000) @@ -40,17 +48,19 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { //Step 1. create account with a nonce and a balance address, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetAccountWithJournal(address) - account.(*state.Account).SetNonceWithJournal(nonce) - account.(*state.Account).SetBalanceWithJournal(balance) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) hashCreated, _ := accnts.Commit() //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction2.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + GasLimit: 2, + GasPrice: 1, + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -59,6 +69,8 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { hashAfterExec, _ := accnts.Commit() assert.NotEqual(t, hashCreated, hashAfterExec) + balance = balance.Sub(balance, big.NewInt(0).SetUint64(tx.GasPrice*tx.GasLimit)) + accountAfterExec, _ := accnts.GetAccountWithJournal(address) assert.Equal(t, nonce+1, accountAfterExec.(*state.Account).Nonce) assert.Equal(t, balance, accountAfterExec.(*state.Account).Balance) @@ -76,7 +88,15 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := transaction.NewTxProcessor( + accnts, + hasher, + addrConv, + marshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + ) nonce := uint64(6) balance := big.NewInt(10000) @@ -84,17 +104,19 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { //Step 1. create account with a nonce and a balance address, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetAccountWithJournal(address) - account.(*state.Account).SetNonceWithJournal(nonce) - account.(*state.Account).SetBalanceWithJournal(balance) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) - accnts.Commit() + _, _ = accnts.Commit() //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction2.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), + GasLimit: 2, + GasPrice: 2, } err := txProcessor.ProcessTransaction(tx, 0) @@ -124,8 +146,8 @@ func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetAccountWithJournal(sender) - account.(*state.Account).SetNonceWithJournal(nonce) - account.(*state.Account).SetBalanceWithJournal(balance) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) initialHash, _ := accnts.Commit() fmt.Printf("Initial hash: %s\n", base64.StdEncoding.EncodeToString(initialHash)) @@ -147,17 +169,29 @@ func testExecTransactionsMoreTxWithRevert( shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txProcessor, _ := transaction.NewTxProcessor(accnts, hasher, addrConv, marshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := transaction.NewTxProcessor( + accnts, + hasher, + addrConv, + marshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + ) txToGenerate := 15000 - + gasPrice := uint64(2) + gasLimit := uint64(2) + value := uint64(1) //Step 1. execute a lot moving transactions from pubKeyBuff to another pubKeyBuff for i := 0; i < txToGenerate; i++ { tx := &transaction2.Transaction{ - Nonce: initialNonce + uint64(i), - Value: big.NewInt(1), - SndAddr: sender.Bytes(), - RcvAddr: receiver.Bytes(), + Nonce: initialNonce + uint64(i), + Value: big.NewInt(int64(value)), + GasPrice: gasPrice, + GasLimit: gasLimit, + SndAddr: sender.Bytes(), + RcvAddr: receiver.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -172,7 +206,7 @@ func testExecTransactionsMoreTxWithRevert( newAccount, _ := accnts.GetAccountWithJournal(receiver) account, _ := accnts.GetAccountWithJournal(sender) - assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(txToGenerate))) + assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(uint64(txToGenerate)*(gasPrice*gasLimit+value)))) assert.Equal(t, account.(*state.Account).Nonce, uint64(txToGenerate)+initialNonce) assert.Equal(t, newAccount.(*state.Account).Balance, big.NewInt(int64(txToGenerate))) @@ -218,8 +252,8 @@ func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *t receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetAccountWithJournal(sender) - account.(*state.Account).SetNonceWithJournal(nonce) - account.(*state.Account).SetBalanceWithJournal(balance) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) initialHash, _ := accnts.Commit() fmt.Printf("Initial hash: %s\n", base64.StdEncoding.EncodeToString(initialHash)) diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index e7d1c2975f9..bce462685ea 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -84,8 +84,17 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor } @@ -115,8 +124,17 @@ func CreateTxProcessorWithOneSCExecutorIeleVM(accnts state.AccountsAdapter) proc addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor } diff --git a/process/interface.go b/process/interface.go index 9ac0656b05e..985639f08dc 100644 --- a/process/interface.go +++ b/process/interface.go @@ -77,11 +77,12 @@ type UnsignedTxHandler interface { CleanProcessedUTxs() AddProcessedUTx(tx data.TransactionHandler) CreateAllUTxs() []data.TransactionHandler - VerifyCreatedUTxs(block block.Body) + VerifyCreatedUTxs(header data.HeaderHandler, block block.Body) error } type SpecialAddressHandler interface { GetMyOwnAddress() []byte + GetElrondCommunityAddress() []byte } // Preprocessor is an interface used to prepare and process transaction data diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..02288975fc0 --- /dev/null +++ b/process/mock/unsignedTxHandlerMock.go @@ -0,0 +1,45 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + AddProcessedUTxCalled func(tx data.TransactionHandler) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func(header data.HeaderHandler, body block.Body) error +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() + return +} + +func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { + if ut.AddProcessedUTxCalled == nil { + return + } + + ut.AddProcessedUTxCalled(tx) + return +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs(header data.HeaderHandler, body block.Body) error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled(header, body) +} diff --git a/process/smartContract/export_test.go b/process/smartContract/export_test.go index 28bb3921f57..3e8d664ff35 100644 --- a/process/smartContract/export_test.go +++ b/process/smartContract/export_test.go @@ -2,6 +2,7 @@ package smartContract import ( "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "math/big" @@ -22,11 +23,21 @@ func (sc *scProcessor) CreateVMInput(tx *transaction.Transaction) (*vmcommon.VMI return sc.createVMInput(tx) } -func (sc *scProcessor) ProcessVMOutput(vmOutput *vmcommon.VMOutput, tx *transaction.Transaction, acntSnd state.AccountHandler, round uint32) ([]data.TransactionHandler, error) { +func (sc *scProcessor) ProcessVMOutput( + vmOutput *vmcommon.VMOutput, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint32, +) ([]data.TransactionHandler, *feeTx.FeeTx, error) { return sc.processVMOutput(vmOutput, tx, acntSnd, round) } -func (sc *scProcessor) RefundGasToSender(gasRefund *big.Int, tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler) (*smartContractResult.SmartContractResult, error) { +func (sc *scProcessor) RefundGasToSender( + gasRefund *big.Int, + tx *transaction.Transaction, + txHash []byte, + acntSnd state.AccountHandler, +) (*smartContractResult.SmartContractResult, *big.Int, error) { return sc.refundGasToSender(gasRefund, tx, txHash, acntSnd) } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 26703c1e3d3..c930f2eabeb 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "math/big" "sync" @@ -186,7 +187,7 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( } // VM is formally verified and the output is correct - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -196,6 +197,8 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( return err } + sc.txFeeHandler.AddProcessedUTx(consumedFee) + return nil } @@ -255,7 +258,7 @@ func (sc *scProcessor) DeploySmartContract( } // VM is formally verified, the output is correct - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -265,6 +268,8 @@ func (sc *scProcessor) DeploySmartContract( return err } + sc.txFeeHandler.AddProcessedUTx(consumedFee) + return nil } @@ -367,40 +372,40 @@ func (sc *scProcessor) processVMOutput( tx *transaction.Transaction, acntSnd state.AccountHandler, round uint32, -) ([]data.TransactionHandler, error) { +) ([]data.TransactionHandler, *feeTx.FeeTx, error) { if vmOutput == nil { - return nil, process.ErrNilVMOutput + return nil, nil, process.ErrNilVMOutput } if tx == nil { - return nil, process.ErrNilTransaction + return nil, nil, process.ErrNilTransaction } txBytes, err := sc.marshalizer.Marshal(tx) if err != nil { - return nil, err + return nil, nil, err } txHash := sc.hasher.Compute(string(txBytes)) err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) if err != nil { - return nil, err + return nil, nil, err } crossOutAccs, err := sc.processSCOutputAccounts(vmOutput.OutputAccounts) if err != nil { - return nil, err + return nil, nil, err } crossTxs, err := sc.createCrossShardTransactions(crossOutAccs, tx, txHash) if err != nil { - return nil, err + return nil, nil, err } totalGasRefund := big.NewInt(0) totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) - scrIfCrossShard, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) + scrIfCrossShard, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) if err != nil { - return nil, err + return nil, nil, err } if scrIfCrossShard != nil { @@ -409,15 +414,20 @@ func (sc *scProcessor) processVMOutput( err = sc.deleteAccounts(vmOutput.DeletedAccounts) if err != nil { - return nil, err + return nil, nil, err } err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) if err != nil { - return nil, err + return nil, nil, err + } + + currFeeTx := &feeTx.FeeTx{ + Nonce: tx.Nonce, + Value: consumedFee, } - return crossTxs, nil + return crossTxs, currFeeTx, nil } func (sc *scProcessor) createSmartContractResult( @@ -459,14 +469,18 @@ func (sc *scProcessor) refundGasToSender( tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler, -) (*smartContractResult.SmartContractResult, error) { +) (*smartContractResult.SmartContractResult, *big.Int, error) { + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { - return nil, nil + return nil, consumedFee, nil } refundErd := big.NewInt(0) refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) + consumedFee = consumedFee.Sub(consumedFee, refundErd) + if acntSnd == nil || acntSnd.IsInterfaceNil() { scTx := &smartContractResult.SmartContractResult{} scTx.Value = refundErd @@ -474,21 +488,21 @@ func (sc *scProcessor) refundGasToSender( scTx.SndAddr = tx.RcvAddr scTx.Nonce = tx.Nonce scTx.TxHash = txHash - return scTx, nil + return scTx, consumedFee, nil } stAcc, ok := acntSnd.(*state.Account) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) err := stAcc.SetBalanceWithJournal(newBalance) if err != nil { - return nil, err + return nil, nil, err } - return nil, nil + return nil, consumedFee, nil } // save account changes in state from vmOutput - protected by VM - every output can be treated as is. diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index bdfab1b3811..ae787a013cf 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -65,7 +65,9 @@ func TestNewSmartContractProcessorNilVM(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNoVM, err) @@ -83,7 +85,9 @@ func TestNewSmartContractProcessorNilArgsParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilArgumentParser, err) @@ -101,7 +105,9 @@ func TestNewSmartContractProcessorNilHasher(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilHasher, err) @@ -119,7 +125,9 @@ func TestNewSmartContractProcessorNilMarshalizer(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -137,7 +145,9 @@ func TestNewSmartContractProcessorNilAccountsDB(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -155,7 +165,9 @@ func TestNewSmartContractProcessorNilAdrConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, nil, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -173,7 +185,9 @@ func TestNewSmartContractProcessorNilShardCoordinator(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, nil, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -191,7 +205,9 @@ func TestNewSmartContractProcessorNilFakeAccountsHandler(t *testing.T) { nil, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilTemporaryAccountsHandler, err) @@ -209,7 +225,9 @@ func TestNewSmartContractProcessor_NilIntermediateMock(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - nil) + nil, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) @@ -227,7 +245,9 @@ func TestNewSmartContractProcessor(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -245,7 +265,9 @@ func TestScProcessor_ComputeTransactionTypeNil(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -266,7 +288,9 @@ func TestScProcessor_ComputeTransactionTypeNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -294,7 +318,9 @@ func TestScProcessor_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -322,7 +348,9 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addressConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -364,7 +392,9 @@ func TestScProcessor_ComputeTransactionTypeScInvoking(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -398,7 +428,9 @@ func TestScProcessor_ComputeTransactionTypeMoveBalance(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -423,7 +455,9 @@ func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -458,7 +492,9 @@ func TestScProcessor_DeploySmartContractRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -492,7 +528,9 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -523,7 +561,9 @@ func TestScProcessor_DeploySmartContract(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -553,7 +593,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -583,7 +625,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -621,7 +665,9 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -656,7 +702,9 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -691,7 +739,9 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -722,7 +772,9 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -756,7 +808,9 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -786,7 +840,9 @@ func TestScProcessor_CreateVMDeployInputBadFunction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -821,7 +877,9 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -851,7 +909,9 @@ func TestScProcessor_CreateVMInputWrongArgument(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -885,7 +945,9 @@ func TestScProcessor_CreateVMInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -928,13 +990,15 @@ func TestScProcessor_processVMOutputNilVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, tx := createAccountsAndTransaction() - _, err = sc.processVMOutput(nil, tx, acntSrc, 10) + _, _, err = sc.processVMOutput(nil, tx, acntSrc, 10) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -952,14 +1016,16 @@ func TestScProcessor_processVMOutputNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, _ := createAccountsAndTransaction() vmOutput := &vmcommon.VMOutput{} - _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) + _, _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) assert.Equal(t, process.ErrNilTransaction, err) } @@ -977,7 +1043,9 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -987,7 +1055,7 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { GasRefund: big.NewInt(0), GasRemaining: big.NewInt(0), } - _, err = sc.processVMOutput(vmOutput, tx, nil, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, nil, 10) assert.Nil(t, err) } @@ -1005,7 +1073,9 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1015,7 +1085,7 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { GasRefund: big.NewInt(0), GasRemaining: big.NewInt(0), } - _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) assert.Nil(t, err) } @@ -1044,7 +1114,9 @@ func TestScProcessor_GetAccountFromAddressAccNotFound(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1082,7 +1154,9 @@ func TestScProcessor_GetAccountFromAddrFaildAddressConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1120,7 +1194,9 @@ func TestScProcessor_GetAccountFromAddrFailedGetExistingAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1158,7 +1234,9 @@ func TestScProcessor_GetAccountFromAddrAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1197,7 +1275,9 @@ func TestScProcessor_GetAccountFromAddr(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1237,7 +1317,9 @@ func TestScProcessor_DeleteAccountsFailedAtRemove(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1277,7 +1359,9 @@ func TestScProcessor_DeleteAccountsNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1322,7 +1406,9 @@ func TestScProcessor_DeleteAccountsInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1346,7 +1432,9 @@ func TestScProcessor_ProcessSCPaymentAccNotInShardShouldNotReturnError(t *testin &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1376,7 +1464,9 @@ func TestScProcessor_ProcessSCPaymentWrongTypeAssertion(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1408,7 +1498,9 @@ func TestScProcessor_ProcessSCPaymentNotEnoughBalance(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1445,7 +1537,9 @@ func TestScProcessor_ProcessSCPayment(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1480,7 +1574,9 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1499,11 +1595,11 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { acntSrc, _ := createAccounts(tx) currBalance := acntSrc.(*state.Account).Balance.Uint64() - _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) - _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) } @@ -1520,7 +1616,9 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1536,17 +1634,19 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { txHash := []byte("txHash") acntSrc, _ := createAccounts(tx) - sctx, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) + sctx, consumed, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) acntSrc = nil - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) badAcc := &mock.AccountWrapMock{} - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) assert.Equal(t, process.ErrWrongTypeAssertion, err) assert.Nil(t, sctx) } @@ -1563,7 +1663,9 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1581,7 +1683,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { currBalance := acntSrc.(*state.Account).Balance.Uint64() refundGas := big.NewInt(10) - _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) assert.Nil(t, err) totalRefund := refundGas.Uint64() * tx.GasPrice @@ -1603,11 +1705,13 @@ func TestScProcessor_processVMOutputNilOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) - _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -1627,12 +1731,14 @@ func TestScProcessor_processVMOutputNilTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) vmOutput := &vmcommon.VMOutput{} - _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) assert.Equal(t, process.ErrNilTransaction, err) } @@ -1652,7 +1758,9 @@ func TestScProcessor_processVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1660,7 +1768,7 @@ func TestScProcessor_processVMOutput(t *testing.T) { GasRefund: big.NewInt(0), GasRemaining: big.NewInt(0), } - _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) assert.Nil(t, err) } @@ -1680,7 +1788,9 @@ func TestScProcessor_processSCOutputAccounts(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1762,7 +1872,9 @@ func TestScProcessor_processSCOutputAccountsNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1802,7 +1914,9 @@ func TestScProcessor_CreateCrossShardTransactions(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1845,7 +1959,9 @@ func TestScProcessor_ProcessSmartContractResultNilScr(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1871,7 +1987,9 @@ func TestScProcessor_ProcessSmartContractResultErrGetAccount(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1895,7 +2013,9 @@ func TestScProcessor_ProcessSmartContractResultAccNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1924,7 +2044,9 @@ func TestScProcessor_ProcessSmartContractResultBadAccType(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1956,7 +2078,9 @@ func TestScProcessor_ProcessSmartContractResultOutputBalanceNil(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1994,7 +2118,9 @@ func TestScProcessor_ProcessSmartContractResultWithCode(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2036,7 +2162,9 @@ func TestScProcessor_ProcessSmartContractResultWithData(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 6aeb800c890..b7d4684c65f 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -2,6 +2,7 @@ package transaction import ( "math/big" + "sync" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -9,6 +10,8 @@ import ( type TxProcessor *txProcessor +var mutex sync.Mutex + func (txProc *txProcessor) GetAddresses(tx *transaction.Transaction) (adrSrc, adrDst state.AddressContainer, err error) { return txProc.getAddresses(tx) } @@ -29,3 +32,15 @@ func (txProc *txProcessor) MoveBalances(acntSrc, acntDst *state.Account, value * func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { return txProc.increaseNonce(acntSrc) } + +func (txProc *txProcessor) SetMinTxFee(minTxFee int64) { + mutex.Lock() + MinTxFee = minTxFee + mutex.Unlock() +} + +func (txProc *txProcessor) SetMinGasPrice(minGasPrice int64) { + mutex.Lock() + MinGasPrice = minGasPrice + mutex.Unlock() +} diff --git a/process/transaction/process.go b/process/transaction/process.go index 44fb3f82996..f87acacad38 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -16,8 +16,8 @@ import ( var log = logger.DefaultLogger() -const MinGasPrice = 1 -const MinTxFee = 1 +var MinGasPrice = int64(1) +var MinTxFee = int64(1) // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { @@ -37,8 +37,8 @@ func NewTxProcessor( addressConv state.AddressConverter, marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, - txFeeHandler process.UnsignedTxHandler, scProcessor process.SmartContractProcessor, + txFeeHandler process.UnsignedTxHandler, ) (*txProcessor, error) { if accounts == nil { @@ -115,7 +115,7 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(MinGasPrice)) minFee = minFee.Add(minFee, big.NewInt(MinTxFee)) - if minFee.Cmp(cost) < 0 { + if minFee.Cmp(cost) > 0 { return nil, process.ErrNotEnoughFeeInTransactions } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index c8ae7050eeb..d2ed50ddfe0 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -51,6 +51,7 @@ func createTxProcessor() txproc.TxProcessor { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) return txProc @@ -68,6 +69,7 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -84,6 +86,7 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -100,6 +103,7 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -116,6 +120,7 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { nil, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -132,6 +137,7 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, nil, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -148,12 +154,30 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), nil, + &mock.UnsignedTxHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) assert.Nil(t, txProc) } +func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txProc, err := txproc.NewTxProcessor( + &mock.AccountsStub{}, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + nil, + ) + + assert.Equal(t, process.ErrNilUnsignedTxHandler, err) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -164,6 +188,7 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) assert.Nil(t, err) @@ -184,6 +209,7 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) addressConv.Fail = true @@ -221,6 +247,7 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -243,6 +270,7 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -282,6 +310,7 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -330,6 +359,7 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -363,6 +393,7 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -387,6 +418,7 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -608,6 +640,7 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) addressConv.Fail = true @@ -626,6 +659,7 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) tx := transaction.Transaction{} @@ -661,6 +695,7 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -710,6 +745,7 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -751,12 +787,15 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t *testing.T) { @@ -802,8 +841,11 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, 1, journalizeCalled) @@ -853,6 +895,7 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -898,15 +941,19 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) + err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, uint64(5), acntSrc.Nonce) assert.Equal(t, big.NewInt(29), acntSrc.Balance) assert.Equal(t, big.NewInt(71), acntDst.Balance) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { @@ -950,7 +997,9 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) scProcessorMock := &mock.SCProcessorMock{} scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType @@ -967,6 +1016,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1015,7 +1065,9 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) scProcessorMock := &mock.SCProcessorMock{} scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType @@ -1032,6 +1084,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1090,7 +1143,9 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod &mock.TemporaryAccountsHandlerMock{}, addrConverter, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) scProcessorMock := &mock.SCProcessorMock{} scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false @@ -1106,11 +1161,15 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod &mock.MarshalizerMock{}, shardCoordinator, scProcessorMock, + &mock.UnsignedTxHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) + err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.False(t, wasCalled) - assert.Equal(t, 2, journalizeCalled) - assert.Equal(t, 2, saveAccountCalled) + assert.Equal(t, 3, journalizeCalled) + assert.Equal(t, 3, saveAccountCalled) } diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index b32b121a2be..c55036e426d 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -2,18 +2,25 @@ package unsigned import ( "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/process" + "math/big" "sync" ) +const burnPercentage = 0.5 // 100 = 100%, 0 = 0% +const communityPercentage = 0.1 // 10 = 100%, 0 = 0% +const leaderPercentage = 0.4 // 10 = 100%, 0 = 0% + type feeTxHandler struct { address process.SpecialAddressHandler mutTxs sync.Mutex feeTxs []*feeTx.FeeTx + + createdTxs []*feeTx.FeeTx } +// NewFeeTxHandler constructor for the fx tee handler func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, error) { ftxh := &feeTxHandler{ address: address, @@ -27,6 +34,7 @@ func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, erro func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Lock() ftxh.feeTxs = make([]*feeTx.FeeTx, 0) + ftxh.createdTxs = make([]*feeTx.FeeTx, 0) ftxh.mutTxs.Unlock() } @@ -42,13 +50,63 @@ func (ftxh *feeTxHandler) AddProcessedUTx(tx data.TransactionHandler) { ftxh.mutTxs.Unlock() } +func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) + + z := new(big.Float).Mul(x, y) + + op := big.NewInt(0) + result, _ := z.Int(op) + + return result +} + +func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { + currTx := &feeTx.FeeTx{} + + currTx.Value = getPercentageOfValue(totalGathered, leaderPercentage) + currTx.RcvAddr = ftxh.address.GetMyOwnAddress() + + return currTx +} + +func (ftxh *feeTxHandler) createCommunityTx(totalGathered *big.Int) *feeTx.FeeTx { + currTx := &feeTx.FeeTx{} + + currTx.Value = getPercentageOfValue(totalGathered, communityPercentage) + currTx.RcvAddr = ftxh.address.GetElrondCommunityAddress() + + return currTx +} + // CreateAllUtxs creates all the needed fee transactions // According to economic paper 50% burn, 40% to the leader, 10% to Elrond community fund func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { - panic("implement me") + ftxh.mutTxs.Lock() + defer ftxh.mutTxs.Unlock() + + totalFee := big.NewInt(0) + for _, val := range ftxh.feeTxs { + totalFee = totalFee.Add(totalFee, val.Value) + } + + if totalFee.Cmp(big.NewInt(1)) < 0 { + return nil + } + + leaderTx := ftxh.createLeaderTx(totalFee) + communityTx := ftxh.createCommunityTx(totalFee) + + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx) + currFeeTxs = append(currFeeTxs, communityTx) + + return currFeeTxs } -// VerifyCreatedUTxs -func (ftxh *feeTxHandler) VerifyCreatedUTxs(block block.Body) { - panic("implement me") +// VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same +func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { + + return nil } From a87bdabeb67ec66648252c17f753d96f6dbee5c7 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 8 Jul 2019 19:32:41 +0300 Subject: [PATCH 009/234] feeTxHandler implementation --- process/errors.go | 3 +++ process/interface.go | 4 ++-- process/unsigned/feeTxHandler.go | 31 +++++++++++++++++++++++++++---- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/process/errors.go b/process/errors.go index 5c84184bfa5..63146dab537 100644 --- a/process/errors.go +++ b/process/errors.go @@ -408,3 +408,6 @@ var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") // ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") + +// ErrTxsFeesDoesNotMatch signals that txs fees do not match +var ErrTxsFeesDoesNotMatch = errors.New("calculated tx fees with block tx fee does not match") diff --git a/process/interface.go b/process/interface.go index 985639f08dc..3b4ed2009da 100644 --- a/process/interface.go +++ b/process/interface.go @@ -77,12 +77,12 @@ type UnsignedTxHandler interface { CleanProcessedUTxs() AddProcessedUTx(tx data.TransactionHandler) CreateAllUTxs() []data.TransactionHandler - VerifyCreatedUTxs(header data.HeaderHandler, block block.Body) error + VerifyCreatedUTxs() error } type SpecialAddressHandler interface { - GetMyOwnAddress() []byte GetElrondCommunityAddress() []byte + GetLeaderAddress() []byte } // Preprocessor is an interface used to prepare and process transaction data diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index c55036e426d..71a338164ca 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -8,7 +8,6 @@ import ( "sync" ) -const burnPercentage = 0.5 // 100 = 100%, 0 = 0% const communityPercentage = 0.1 // 10 = 100%, 0 = 0% const leaderPercentage = 0.4 // 10 = 100%, 0 = 0% @@ -17,7 +16,7 @@ type feeTxHandler struct { mutTxs sync.Mutex feeTxs []*feeTx.FeeTx - createdTxs []*feeTx.FeeTx + feeTxsFromBlock map[string]*feeTx.FeeTx } // NewFeeTxHandler constructor for the fx tee handler @@ -26,6 +25,7 @@ func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, erro address: address, } ftxh.feeTxs = make([]*feeTx.FeeTx, 0) + ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) return ftxh, nil } @@ -34,7 +34,7 @@ func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, erro func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Lock() ftxh.feeTxs = make([]*feeTx.FeeTx, 0) - ftxh.createdTxs = make([]*feeTx.FeeTx, 0) + ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) ftxh.mutTxs.Unlock() } @@ -66,7 +66,7 @@ func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { currTx := &feeTx.FeeTx{} currTx.Value = getPercentageOfValue(totalGathered, leaderPercentage) - currTx.RcvAddr = ftxh.address.GetMyOwnAddress() + currTx.RcvAddr = ftxh.address.GetLeaderAddress() return currTx } @@ -107,6 +107,29 @@ func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { // VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { + calculatedFeeTxs := ftxh.CreateAllUTxs() + + ftxh.mutTxs.Lock() + defer ftxh.mutTxs.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, value := range ftxh.feeTxsFromBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedFeeTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + commTxFromBlock := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] + if commTxFromBlock.Value.Cmp(value.GetValue()) != 0 { + return process.ErrTxsFeesDoesNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTxsFeesDoesNotMatch + } return nil } From 2066f9f4c8167ef436652caa1c539ff050b62ce9 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 8 Jul 2019 21:17:18 +0300 Subject: [PATCH 010/234] feeTxHandler implementation --- data/feeTx/feeTx.go | 4 +- data/feeTx/feeTx_test.go | 1 - process/constants.go | 2 + process/coordinator/transactionType_test.go | 188 ++++++++++++++++++++ process/errors.go | 3 + process/interface.go | 4 +- process/mock/txProcessorMock.go | 7 +- process/mock/txTypeHandlerMock.go | 18 ++ process/mock/unsignedTxHandlerMock.go | 18 +- process/smartContract/process.go | 31 ---- process/smartContract/process_test.go | 187 ------------------- process/transaction/process.go | 61 ++++++- process/transaction/process_test.go | 81 +++++---- process/unsigned/feeTxHandler.go | 11 ++ 14 files changed, 332 insertions(+), 284 deletions(-) create mode 100644 process/coordinator/transactionType_test.go create mode 100644 process/mock/txTypeHandlerMock.go diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go index 440af8b8448..1482357cee6 100644 --- a/data/feeTx/feeTx.go +++ b/data/feeTx/feeTx.go @@ -13,7 +13,7 @@ type FeeTx struct { Nonce uint64 `capid:"0" json:"nonce"` Value *big.Int `capid:"1" json:"value"` RcvAddr []byte `capid:"2" json:"receiver"` - TxHash []byte `capid:"3" json:"txHash"` + ShardId uint32 `capid:"3" json:"ShardId"` } // Save saves the serialized data of a FeeTx into a stream through Capnp protocol @@ -54,7 +54,6 @@ func FeeTxCapnToGo(src capnp.FeeTxCapn, dest *FeeTx) *FeeTx { } dest.RcvAddr = src.RcvAddr() - dest.TxHash = src.TxHash() return dest } @@ -67,7 +66,6 @@ func FeeTxGoToCapn(seg *capn.Segment, src *FeeTx) capnp.FeeTxCapn { dest.SetNonce(src.Nonce) dest.SetValue(value) dest.SetRcvAddr(src.RcvAddr) - dest.SetTxHash(src.TxHash) return dest } diff --git a/data/feeTx/feeTx_test.go b/data/feeTx/feeTx_test.go index 886593bf278..74013540c97 100644 --- a/data/feeTx/feeTx_test.go +++ b/data/feeTx/feeTx_test.go @@ -14,7 +14,6 @@ func TestFeeTx_SaveLoad(t *testing.T) { Nonce: uint64(1), Value: big.NewInt(1), RcvAddr: []byte("receiver_address"), - TxHash: []byte("scrHash"), } var b bytes.Buffer diff --git a/process/constants.go b/process/constants.go index af476415c18..631651008e7 100644 --- a/process/constants.go +++ b/process/constants.go @@ -22,6 +22,8 @@ const ( SCDeployment // SCInvoking defines ID of a transaction of type smart contract call SCInvoking + // TxFee defines ID of a transaction of type tx fee + TxFee // InvalidTransaction defines unknown transaction type InvalidTransaction ) diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go new file mode 100644 index 00000000000..ebffc51dde1 --- /dev/null +++ b/process/coordinator/transactionType_test.go @@ -0,0 +1,188 @@ +package coordinator + +import ( + "crypto/rand" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" + "math/big" + "testing" +) + +func generateRandomByteSlice(size int) []byte { + buff := make([]byte, size) + _, _ = rand.Reader.Read(buff) + + return buff +} + +func createAccounts(tx *transaction.Transaction) (state.AccountHandler, state.AccountHandler) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acntSrc, _ := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, tx.Value) + totalFee := big.NewInt(0) + totalFee = totalFee.Mul(big.NewInt(int64(tx.GasLimit)), big.NewInt(int64(tx.GasPrice))) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, totalFee) + + acntDst, _ := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + + return acntSrc, acntDst +} + +func TestTxTypeHandler_ComputeTransactionTypeNil(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + _, err = tth.ComputeTransactionType(nil) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeNilTx(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(45) + + tx = nil + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = nil + tx.Value = big.NewInt(45) + + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeScDeployment(t *testing.T) { + t.Parallel() + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = make([]byte, addressConverter.AddressLen()) + tx.Data = []byte("data") + tx.Value = big.NewInt(45) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCDeployment, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = []byte("data") + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + acntDst.SetCode([]byte("code")) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCInvoking, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = []byte("data") + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.MoveBalance, txType) +} diff --git a/process/errors.go b/process/errors.go index 63146dab537..be48da92c05 100644 --- a/process/errors.go +++ b/process/errors.go @@ -411,3 +411,6 @@ var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") // ErrTxsFeesDoesNotMatch signals that txs fees do not match var ErrTxsFeesDoesNotMatch = errors.New("calculated tx fees with block tx fee does not match") + +// ErrNilTxTypeHandler signals that tx type handler is nil +var ErrNilTxTypeHandler = errors.New("nil tx type handler") diff --git a/process/interface.go b/process/interface.go index 3b4ed2009da..9a5993979b4 100644 --- a/process/interface.go +++ b/process/interface.go @@ -17,7 +17,7 @@ import ( // TransactionProcessor is the main interface for transaction execution engine type TransactionProcessor interface { - ProcessTransaction(transaction *transaction.Transaction, round uint32) error + ProcessTransaction(tx data.TransactionHandler, roundIndex uint32) error } // SmartContractResultProcessor is the main interface for smart contract result execution engine @@ -55,7 +55,6 @@ type TransactionCoordinator interface { // SmartContractProcessor is the main interface for the smart contract caller engine type SmartContractProcessor interface { - ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint32) error DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint32) error } @@ -78,6 +77,7 @@ type UnsignedTxHandler interface { AddProcessedUTx(tx data.TransactionHandler) CreateAllUTxs() []data.TransactionHandler VerifyCreatedUTxs() error + AddTxFeeFromBlock(tx data.TransactionHandler) } type SpecialAddressHandler interface { diff --git a/process/mock/txProcessorMock.go b/process/mock/txProcessorMock.go index f0b55f23a67..50ef78033ff 100644 --- a/process/mock/txProcessorMock.go +++ b/process/mock/txProcessorMock.go @@ -1,19 +1,18 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "math/big" - - "github.com/ElrondNetwork/elrond-go/data/transaction" ) type TxProcessorMock struct { - ProcessTransactionCalled func(transaction *transaction.Transaction, round uint32) error + ProcessTransactionCalled func(transaction data.TransactionHandler, round uint32) error SetBalancesToTrieCalled func(accBalance map[string]*big.Int) (rootHash []byte, err error) ProcessSmartContractResultCalled func(scr *smartContractResult.SmartContractResult) error } -func (etm *TxProcessorMock) ProcessTransaction(transaction *transaction.Transaction, round uint32) error { +func (etm *TxProcessorMock) ProcessTransaction(transaction data.TransactionHandler, round uint32) error { return etm.ProcessTransactionCalled(transaction, round) } diff --git a/process/mock/txTypeHandlerMock.go b/process/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..2fcaeaf25d3 --- /dev/null +++ b/process/mock/txTypeHandlerMock.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go index 02288975fc0..9ac5fd55a10 100644 --- a/process/mock/unsignedTxHandlerMock.go +++ b/process/mock/unsignedTxHandlerMock.go @@ -2,14 +2,22 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" ) type UnsignedTxHandlerMock struct { CleanProcessedUtxsCalled func() AddProcessedUTxCalled func(tx data.TransactionHandler) CreateAllUTxsCalled func() []data.TransactionHandler - VerifyCreatedUTxsCalled func(header data.HeaderHandler, body block.Body) error + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) } func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { @@ -18,7 +26,6 @@ func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { } ut.CleanProcessedUtxsCalled() - return } func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { @@ -27,7 +34,6 @@ func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { } ut.AddProcessedUTxCalled(tx) - return } func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { @@ -37,9 +43,9 @@ func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { return ut.CreateAllUTxsCalled() } -func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs(header data.HeaderHandler, body block.Body) error { +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { if ut.VerifyCreatedUTxsCalled == nil { return nil } - return ut.VerifyCreatedUTxsCalled(header, body) + return ut.VerifyCreatedUTxsCalled() } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index c930f2eabeb..6fc57b3c852 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -104,37 +104,6 @@ func NewSmartContractProcessor( mapExecState: make(map[uint32]scExecutionState)}, nil } -// ComputeTransactionType calculates the type of the transaction -func (sc *scProcessor) ComputeTransactionType(tx *transaction.Transaction) (process.TransactionType, error) { - err := sc.checkTxValidity(tx) - if err != nil { - return 0, err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if isEmptyAddress { - if len(tx.Data) > 0 { - return process.SCDeployment, nil - } - return 0, process.ErrWrongTransaction - } - - acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) - if err != nil { - return 0, err - } - - if acntDst == nil { - return process.MoveBalance, nil - } - - if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { - return process.SCInvoking, nil - } - - return process.MoveBalance, nil -} - func (sc *scProcessor) checkTxValidity(tx *transaction.Transaction) error { if tx == nil || tx.IsInterfaceNil() { return process.ErrNilTransaction diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index ae787a013cf..5c5be20f7de 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -253,193 +253,6 @@ func TestNewSmartContractProcessor(t *testing.T) { assert.Nil(t, err) } -func TestScProcessor_ComputeTransactionTypeNil(t *testing.T) { - t.Parallel() - - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, - &mock.AddressConverterMock{}, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - _, err = sc.ComputeTransactionType(nil) - assert.Equal(t, process.ErrNilTransaction, err) -} - -func TestScProcessor_ComputeTransactionTypeNilTx(t *testing.T) { - t.Parallel() - - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, - &mock.AddressConverterMock{}, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = []byte("DST") - tx.Value = big.NewInt(45) - - tx = nil - _, err = sc.ComputeTransactionType(tx) - assert.Equal(t, process.ErrNilTransaction, err) -} - -func TestScProcessor_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { - t.Parallel() - - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, - &mock.AddressConverterMock{}, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = nil - tx.Value = big.NewInt(45) - - _, err = sc.ComputeTransactionType(tx) - assert.Equal(t, process.ErrWrongTransaction, err) -} - -func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { - t.Parallel() - - addressConverter := &mock.AddressConverterMock{} - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, - addressConverter, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = make([]byte, addressConverter.AddressLen()) - tx.Data = []byte("data") - tx.Value = big.NewInt(45) - - txType, err := sc.ComputeTransactionType(tx) - assert.Nil(t, err) - assert.Equal(t, process.SCDeployment, txType) -} - -func TestScProcessor_ComputeTransactionTypeScInvoking(t *testing.T) { - t.Parallel() - - addrConverter := &mock.AddressConverterMock{} - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = []byte("data") - tx.Value = big.NewInt(45) - - _, acntDst := createAccounts(tx) - acntDst.SetCode([]byte("code")) - - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - txType, err := sc.ComputeTransactionType(tx) - assert.Nil(t, err) - assert.Equal(t, process.SCInvoking, txType) -} - -func TestScProcessor_ComputeTransactionTypeMoveBalance(t *testing.T) { - t.Parallel() - - addrConverter := &mock.AddressConverterMock{} - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = []byte("data") - tx.Value = big.NewInt(45) - - _, acntDst := createAccounts(tx) - - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) - - assert.NotNil(t, sc) - assert.Nil(t, err) - - txType, err := sc.ComputeTransactionType(tx) - assert.Nil(t, err) - assert.Equal(t, process.MoveBalance, txType) -} - func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { t.Parallel() diff --git a/process/transaction/process.go b/process/transaction/process.go index f87acacad38..4e90697c128 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -2,6 +2,7 @@ package transaction import ( "bytes" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/feeTx" "math/big" @@ -28,6 +29,7 @@ type txProcessor struct { marshalizer marshal.Marshalizer txFeeHandler process.UnsignedTxHandler shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine @@ -39,6 +41,7 @@ func NewTxProcessor( shardCoordinator sharding.Coordinator, scProcessor process.SmartContractProcessor, txFeeHandler process.UnsignedTxHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { if accounts == nil { @@ -62,6 +65,9 @@ func NewTxProcessor( if txFeeHandler == nil { return nil, process.ErrNilUnsignedTxHandler } + if txTypeHandler == nil { + return nil, process.ErrNilTxTypeHandler + } return &txProcessor{ accounts: accounts, @@ -71,32 +77,44 @@ func NewTxProcessor( shardCoordinator: shardCoordinator, scProcessor: scProcessor, txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, }, nil } // ProcessTransaction modifies the account states in respect with the transaction data -func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint32) error { - if tx == nil { +func (txProc *txProcessor) ProcessTransaction(tx data.TransactionHandler, roundIndex uint32) error { + if tx == nil || tx.IsInterfaceNil() { return process.ErrNilTransaction } + currTxFee, ok := tx.(*feeTx.FeeTx) + if ok { + txProc.txFeeHandler.AddTxFeeFromBlock(currTxFee) + } + adrSrc, adrDst, err := txProc.getAddresses(tx) if err != nil { return err } - txType, err := txProc.scProcessor.ComputeTransactionType(tx) + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) if err != nil { return err } switch txType { case process.MoveBalance: - return txProc.processMoveBalance(tx, adrSrc, adrDst) + currTx := tx.(*transaction.Transaction) + return txProc.processMoveBalance(currTx, adrSrc, adrDst) case process.SCDeployment: - return txProc.processSCDeployment(tx, adrSrc, roundIndex) + currTx := tx.(*transaction.Transaction) + return txProc.processSCDeployment(currTx, adrSrc, roundIndex) case process.SCInvoking: - return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) + currTx := tx.(*transaction.Transaction) + return txProc.processSCInvoking(currTx, adrSrc, adrDst, roundIndex) + case process.TxFee: + currTxFee := tx.(*feeTx.FeeTx) + return txProc.processAccumulatedTxFees(currTxFee, adrSrc) } return process.ErrWrongTransaction @@ -137,6 +155,31 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st return currFeeTx, nil } +func (txProc *txProcessor) processAccumulatedTxFees( + currTxFee *feeTx.FeeTx, + adrSrc state.AddressContainer, +) error { + acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) + if err != nil { + return err + } + + // is sender address in node shard + if acntSrc != nil { + op := big.NewInt(0) + err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, currTxFee.Value)) + if err != nil { + return err + } + } + + if currTxFee.ShardId == txProc.shardCoordinator.SelfId() { + txProc.txFeeHandler.AddTxFeeFromBlock(currTxFee) + } + + return nil +} + func (txProc *txProcessor) processMoveBalance( tx *transaction.Transaction, adrSrc, adrDst state.AddressContainer, @@ -213,13 +256,13 @@ func (txProc *txProcessor) processSCInvoking( return err } -func (txProc *txProcessor) getAddresses(tx *transaction.Transaction) (adrSrc, adrDst state.AddressContainer, err error) { +func (txProc *txProcessor) getAddresses(tx data.TransactionHandler) (adrSrc, adrDst state.AddressContainer, err error) { //for now we assume that the address = public key - adrSrc, err = txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) + adrSrc, err = txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.GetSndAddress()) if err != nil { return } - adrDst, err = txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) + adrDst, err = txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.GetRecvAddress()) return } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index d2ed50ddfe0..4028a2f2038 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -4,7 +4,8 @@ import ( "bytes" "crypto/rand" "errors" - "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "math/big" "testing" @@ -52,6 +53,7 @@ func createTxProcessor() txproc.TxProcessor { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) return txProc @@ -70,6 +72,7 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -87,6 +90,7 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -104,6 +108,7 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -121,6 +126,7 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -138,6 +144,7 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { nil, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -155,6 +162,7 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), nil, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -172,6 +180,7 @@ func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, nil, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilUnsignedTxHandler, err) @@ -189,6 +198,7 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Nil(t, err) @@ -210,6 +220,7 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) addressConv.Fail = true @@ -248,6 +259,7 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -271,6 +283,7 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -311,6 +324,7 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -360,6 +374,7 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -394,6 +409,7 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -419,6 +435,7 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -641,6 +658,7 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) addressConv.Fail = true @@ -660,6 +678,7 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) tx := transaction.Transaction{} @@ -696,6 +715,7 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -746,6 +766,7 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -788,6 +809,7 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) execTx.SetMinTxFee(0) @@ -842,6 +864,7 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) execTx.SetMinTxFee(0) @@ -896,6 +919,7 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -942,6 +966,7 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) execTx.SetMinTxFee(0) @@ -987,22 +1012,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { acntDst.SetCode([]byte{65}) accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint32) error { wasCalled = true @@ -1017,6 +1028,11 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { mock.NewOneShardCoordinatorMock(), scProcessorMock, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }, + }, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1056,21 +1072,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint32) error { wasCalled = true @@ -1085,6 +1088,9 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * mock.NewOneShardCoordinatorMock(), scProcessorMock, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1134,26 +1140,18 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}, - &mock.UnsignedTxHandlerMock{}, - ) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint32) error { wasCalled = true return process.ErrNoVM } + computeType, _ := coordinator.NewTxTypeHandler( + &mock.AddressConverterMock{}, + shardCoordinator, + accounts) + execTx, _ := txproc.NewTxProcessor( accounts, mock.HasherMock{}, @@ -1162,6 +1160,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod shardCoordinator, scProcessorMock, &mock.UnsignedTxHandlerMock{}, + computeType, ) execTx.SetMinTxFee(0) diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 71a338164ca..abdefde735f 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -38,6 +38,17 @@ func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Unlock() } +func (ftxh *feeTxHandler) AddTxFeeFromBlock(tx data.TransactionHandler) { + currFeeTx, ok := tx.(*feeTx.FeeTx) + if !ok { + log.Debug(process.ErrWrongTypeAssertion.Error()) + } + + ftxh.mutTxs.Lock() + ftxh.feeTxsFromBlock[string(tx.GetRecvAddress())] = currFeeTx + ftxh.mutTxs.Unlock() +} + // AddProcessedUTx adds a new feeTx to the cache func (ftxh *feeTxHandler) AddProcessedUTx(tx data.TransactionHandler) { currFeeTx, ok := tx.(*feeTx.FeeTx) From 92618a883328cbfcf7cb59704de9880c9e175971 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 9 Jul 2019 10:16:32 +0300 Subject: [PATCH 011/234] fixing tests --- integrationTests/mock/txProcessorMock.go | 7 ++--- integrationTests/mock/txTypeHandlerMock.go | 18 +++++++++++ .../mock/unsignedTxHandlerMock.go | 18 +++++++---- .../multiShard/block/testInitializer.go | 3 ++ .../state/stateExecTransaction_test.go | 3 ++ integrationTests/vm/testInitializer.go | 15 +++++++++ process/block/shardblock_test.go | 7 +++-- process/coordinator/process_test.go | 31 ++++++++++--------- process/coordinator/transactionType_test.go | 4 ++- 9 files changed, 77 insertions(+), 29 deletions(-) create mode 100644 integrationTests/mock/txTypeHandlerMock.go diff --git a/integrationTests/mock/txProcessorMock.go b/integrationTests/mock/txProcessorMock.go index f0b55f23a67..50ef78033ff 100644 --- a/integrationTests/mock/txProcessorMock.go +++ b/integrationTests/mock/txProcessorMock.go @@ -1,19 +1,18 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "math/big" - - "github.com/ElrondNetwork/elrond-go/data/transaction" ) type TxProcessorMock struct { - ProcessTransactionCalled func(transaction *transaction.Transaction, round uint32) error + ProcessTransactionCalled func(transaction data.TransactionHandler, round uint32) error SetBalancesToTrieCalled func(accBalance map[string]*big.Int) (rootHash []byte, err error) ProcessSmartContractResultCalled func(scr *smartContractResult.SmartContractResult) error } -func (etm *TxProcessorMock) ProcessTransaction(transaction *transaction.Transaction, round uint32) error { +func (etm *TxProcessorMock) ProcessTransaction(transaction data.TransactionHandler, round uint32) error { return etm.ProcessTransactionCalled(transaction, round) } diff --git a/integrationTests/mock/txTypeHandlerMock.go b/integrationTests/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..2fcaeaf25d3 --- /dev/null +++ b/integrationTests/mock/txTypeHandlerMock.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go index 02288975fc0..9ac5fd55a10 100644 --- a/integrationTests/mock/unsignedTxHandlerMock.go +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -2,14 +2,22 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" ) type UnsignedTxHandlerMock struct { CleanProcessedUtxsCalled func() AddProcessedUTxCalled func(tx data.TransactionHandler) CreateAllUTxsCalled func() []data.TransactionHandler - VerifyCreatedUTxsCalled func(header data.HeaderHandler, body block.Body) error + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) } func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { @@ -18,7 +26,6 @@ func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { } ut.CleanProcessedUtxsCalled() - return } func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { @@ -27,7 +34,6 @@ func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { } ut.AddProcessedUTxCalled(tx) - return } func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { @@ -37,9 +43,9 @@ func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { return ut.CreateAllUTxsCalled() } -func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs(header data.HeaderHandler, body block.Body) error { +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { if ut.VerifyCreatedUTxsCalled == nil { return nil } - return ut.VerifyCreatedUTxsCalled(header, body) + return ut.VerifyCreatedUTxsCalled() } diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 856bd700369..f08e4accfb0 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -248,6 +248,8 @@ func createNetNode( resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100) + txTypeHandler, _ := coordinator.NewTxTypeHandler(testAddressConverter, shardCoordinator, accntAdapter) + txProcessor, _ := transaction.NewTxProcessor( accntAdapter, testHasher, @@ -256,6 +258,7 @@ func createNetNode( shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) fact, _ := shard.NewPreProcessorsContainerFactory( diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index 98f71ce4c5c..9d84ece146c 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -40,6 +40,7 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) nonce := uint64(6) @@ -96,6 +97,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) nonce := uint64(6) @@ -177,6 +179,7 @@ func testExecTransactionsMoreTxWithRevert( shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) txToGenerate := 15000 diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index bce462685ea..82f6ad51382 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -2,6 +2,7 @@ package vm import ( "fmt" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "math/big" "testing" @@ -86,6 +87,12 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa &mock.IntermediateTransactionHandlerMock{}, &mock.UnsignedTxHandlerMock{}, ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + txProcessor, _ := transaction.NewTxProcessor( accnts, testHasher, @@ -94,6 +101,7 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa oneShardCoordinator, scProcessor, &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) return txProcessor @@ -126,6 +134,12 @@ func CreateTxProcessorWithOneSCExecutorIeleVM(accnts state.AccountsAdapter) proc &mock.IntermediateTransactionHandlerMock{}, &mock.UnsignedTxHandlerMock{}, ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + txProcessor, _ := transaction.NewTxProcessor( accnts, testHasher, @@ -134,6 +148,7 @@ func CreateTxProcessorWithOneSCExecutorIeleVM(accnts state.AccountsAdapter) proc oneShardCoordinator, scProcessor, &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) return txProcessor diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3b70bcc625f..ee97e359a57 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -598,7 +598,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T accounts, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return process.ErrHigherNonceInTransaction }, }, @@ -806,7 +806,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR } err := errors.New("process block transaction error") - txProcess := func(transaction *transaction.Transaction, round uint32) error { + txProcess := func(transaction data.TransactionHandler, round uint32) error { return err } @@ -2425,7 +2425,8 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T tx3ExecutionResult := uint64(0) txProcessorMock := &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(trans data.TransactionHandler, round uint32) error { + transaction, _ := trans.(*transaction.Transaction) //execution, in this context, means moving the tx nonce to itx corresponding execution result variable if bytes.Equal(transaction.Data, txHash1) { tx1ExecutionResult = transaction.Nonce diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index ad6e23013ff..bee6592f189 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -373,7 +373,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return nil }, }, @@ -396,7 +396,7 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return nil }, }, @@ -609,7 +609,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return nil }, }, @@ -693,7 +693,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return nil }, }, @@ -1117,7 +1117,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing accounts, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return process.ErrHigherNonceInTransaction }, }, @@ -1235,7 +1235,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { accounts, requestHandler, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { return nil }, }, @@ -1337,16 +1337,17 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot accounts, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { + ProcessTransactionCalled: func(trans data.TransactionHandler, round uint32) error { //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if bytes.Equal(transaction.Data, txHash1) { - tx1ExecutionResult = transaction.Nonce + tx, _ := trans.(*transaction.Transaction) + if bytes.Equal(tx.Data, txHash1) { + tx1ExecutionResult = tx.Nonce } - if bytes.Equal(transaction.Data, txHash2) { - tx2ExecutionResult = transaction.Nonce + if bytes.Equal(tx.Data, txHash2) { + tx2ExecutionResult = tx.Nonce } - if bytes.Equal(transaction.Data, txHash3) { - tx3ExecutionResult = transaction.Nonce + if bytes.Equal(tx.Data, txHash3) { + tx3ExecutionResult = tx.Nonce } return nil @@ -1449,8 +1450,8 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR accounts, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint32) error { - if bytes.Equal(transaction.Data, txHash2) { + ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { + if bytes.Equal(transaction.GetData(), txHash2) { return process.ErrHigherNonceInTransaction } return nil diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index ebffc51dde1..2636fe66b9e 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -146,7 +146,9 @@ func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { tth, err := NewTxTypeHandler( addressConverter, mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, ) assert.NotNil(t, tth) From 2d11e0d4c3ef1e8486ab8ff417b3a2ad2fc15608 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 9 Jul 2019 15:11:59 +0300 Subject: [PATCH 012/234] added tests. --- data/block/block.go | 4 +- process/block/preprocess/transactions.go | 14 ++- process/coordinator/process.go | 86 ++++++++++++------- process/coordinator/process_test.go | 2 + process/errors.go | 3 + .../intermediateProcessorsContainerFactory.go | 43 ++++++++-- ...rmediateProcessorsContainerFactory_test.go | 8 +- process/interface.go | 2 + process/mock/specialAddressHandlerMock.go | 31 +++++++ process/unsigned/feeTxHandler.go | 75 ++++++++++++++-- 10 files changed, 214 insertions(+), 54 deletions(-) create mode 100644 process/mock/specialAddressHandlerMock.go diff --git a/data/block/block.go b/data/block/block.go index a2a30d342d0..ae55b911d30 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -34,8 +34,10 @@ const ( PeerBlock Type = 2 // SmartContractResultBlock identifies a miniblock holding smartcontractresults SmartContractResultBlock Type = 3 + // TxFeeBlock identifies a miniblock holding accumulated transaction fees + TxFeeBlock Type = 4 // InvalidBlock identifies identifies an invalid miniblock - InvalidBlock Type = 4 + InvalidBlock Type = 5 ) // String returns the string representation of the Type diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 88ee8533db2..9e15bdd839f 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -211,11 +211,7 @@ func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint32, return process.ErrMissingTransaction } - currTx, ok := txInfo.tx.(*transaction.Transaction) - if !ok { - return process.ErrWrongTypeAssertion - } - + currTx := txInfo.tx err := txs.processAndRemoveBadTransaction( txHash, currTx, @@ -298,7 +294,7 @@ func (txs *transactions) computeMissingAndExistingTxsForShards(body block.Body) // processAndRemoveBadTransactions processed transactions, if txs are with error it removes them from pool func (txs *transactions) processAndRemoveBadTransaction( transactionHash []byte, - transaction *transaction.Transaction, + transaction data.TransactionHandler, round uint32, sndShardId uint32, dstShardId uint32, @@ -352,7 +348,7 @@ func (txs *transactions) computeMissingTxsForMiniBlock(mb block.MiniBlock) [][]b func (txs *transactions) getAllTxsFromMiniBlock( mb *block.MiniBlock, haveTime func() bool, -) ([]*transaction.Transaction, [][]byte, error) { +) ([]data.TransactionHandler, [][]byte, error) { strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) txCache := txs.txPool.ShardDataStore(strCache) @@ -361,7 +357,7 @@ func (txs *transactions) getAllTxsFromMiniBlock( } // verify if all transaction exists - transactions := make([]*transaction.Transaction, 0) + transactions := make([]data.TransactionHandler, 0) txHashes := make([][]byte, 0) for _, txHash := range mb.TxHashes { if !haveTime() { @@ -373,7 +369,7 @@ func (txs *transactions) getAllTxsFromMiniBlock( return nil, nil, process.ErrNilTransaction } - tx, ok := tmp.(*transaction.Transaction) + tx, ok := tmp.(data.TransactionHandler) if !ok { return nil, nil, process.ErrWrongTypeAssertion } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index b3386d035e5..a0a8d9a3ac9 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -443,33 +443,56 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe(maxTxRem miniBlocks = append(miniBlocks, interMBs...) } + // add txfee transactions to matching blocks + interimProc := tc.getInterimProcessor(block.TxFeeBlock) + if interimProc == nil { + return miniBlocks + } + + txFeeMbs := interimProc.CreateAllInterMiniBlocks() + for key, mb := range txFeeMbs { + var matchingMBFound bool + for i := 0; i < len(miniBlocks); i++ { + if miniBlocks[i].ReceiverShardID == key && + miniBlocks[i].SenderShardID == tc.shardCoordinator.SelfId() && + miniBlocks[i].Type == block.TxBlock { + miniBlocks[i].TxHashes = append(miniBlocks[i].TxHashes, mb.TxHashes...) + matchingMBFound = true + break + } + } + + if !matchingMBFound { + mb.ReceiverShardID = key + mb.SenderShardID = tc.shardCoordinator.SelfId() + mb.Type = block.TxBlock + + miniBlocks = append(miniBlocks, mb) + } + } + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { miniBlocks := make(block.MiniBlockSlice, 0) - tc.mutInterimProcessors.RLock() - - resMutex := sync.Mutex{} - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) + tc.mutInterimProcessors.Lock() - for _, interimProc := range tc.interimProcessors { + for key, interimProc := range tc.interimProcessors { + if key == block.TxFeeBlock { + // this has to be processed last + continue + } go func() { currMbs := interimProc.CreateAllInterMiniBlocks() - resMutex.Lock() for _, value := range currMbs { miniBlocks = append(miniBlocks, value) } - resMutex.Unlock() - wg.Done() }() } - wg.Wait() - tc.mutInterimProcessors.RUnlock() + tc.mutInterimProcessors.Unlock() return miniBlocks } @@ -612,28 +635,31 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( } func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { - tc.mutInterimProcessors.RLock() + tc.mutInterimProcessors.Lock() - errMutex := sync.Mutex{} var errFound error - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) - for _, interimProc := range tc.interimProcessors { - go func() { - err := interimProc.VerifyInterMiniBlocks(body) - if err != nil { - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }() + for key, interimProc := range tc.interimProcessors { + if key == block.TxFeeBlock { + // this has to be processed last + continue + } + err := interimProc.VerifyInterMiniBlocks(body) + if err != nil { + errFound = err + } } - wg.Wait() - tc.mutInterimProcessors.RUnlock() + tc.mutInterimProcessors.Unlock() - return errFound + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.TxFeeBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index bee6592f189..d5e0abd36d4 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -1495,6 +1495,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, ) container, _ := factory.Create() @@ -1537,6 +1538,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, ) container, _ := factory.Create() diff --git a/process/errors.go b/process/errors.go index be48da92c05..5f52220c700 100644 --- a/process/errors.go +++ b/process/errors.go @@ -414,3 +414,6 @@ var ErrTxsFeesDoesNotMatch = errors.New("calculated tx fees with block tx fee do // ErrNilTxTypeHandler signals that tx type handler is nil var ErrNilTxTypeHandler = errors.New("nil tx type handler") + +// ErrNilSpecialAddressHandler signals that special address handler is nil +var ErrNilSpecialAddressHandler = errors.New("nil special address handler") diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 5fd12cf0451..1e50237a7d5 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -8,14 +8,16 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConverter state.AddressConverter + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConverter state.AddressConverter + specialAddressHandler process.SpecialAddressHandler } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -24,6 +26,7 @@ func NewIntermediateProcessorsContainerFactory( marshalizer marshal.Marshalizer, hasher hashing.Hasher, addrConverter state.AddressConverter, + specialAddressHandler process.SpecialAddressHandler, ) (*intermediateProcessorsContainerFactory, error) { if shardCoordinator == nil { @@ -38,12 +41,16 @@ func NewIntermediateProcessorsContainerFactory( if addrConverter == nil { return nil, process.ErrNilAddressConverter } + if specialAddressHandler == nil { + return nil, process.ErrNilSpecialAddressHandler + } return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + addrConverter: addrConverter, + specialAddressHandler: specialAddressHandler, }, nil } @@ -61,6 +68,16 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } + interproc, err = ppcm.createTxFeeIntermediateProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.TxFeeBlock, interproc) + if err != nil { + return nil, err + } + return container, nil } @@ -75,3 +92,13 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn return irp, err } + +func (ppcm *intermediateProcessorsContainerFactory) createTxFeeIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := unsigned.NewFeeTxHandler( + ppcm.specialAddressHandler, + ppcm.hasher, + ppcm.marshalizer, + ) + + return irp, err +} diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index f446e6c0bc1..da17dda0c0f 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -15,6 +15,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, ipcf) @@ -29,6 +30,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) nil, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, ipcf) @@ -43,6 +45,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.MarshalizerMock{}, nil, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, ipcf) @@ -57,6 +60,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, ipcf) @@ -71,6 +75,7 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, err) @@ -85,6 +90,7 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, ) assert.Nil(t, err) @@ -92,5 +98,5 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { container, err := ipcf.Create() assert.Nil(t, err) - assert.Equal(t, 1, container.Len()) + assert.Equal(t, 2, container.Len()) } diff --git a/process/interface.go b/process/interface.go index 9a5993979b4..35d01ee3ee7 100644 --- a/process/interface.go +++ b/process/interface.go @@ -80,9 +80,11 @@ type UnsignedTxHandler interface { AddTxFeeFromBlock(tx data.TransactionHandler) } +// SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { GetElrondCommunityAddress() []byte GetLeaderAddress() []byte + ShardIdForAddress([]byte) uint32 } // Preprocessor is an interface used to prepare and process transaction data diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..7e6cc1ff027 --- /dev/null +++ b/process/mock/specialAddressHandlerMock.go @@ -0,0 +1,31 @@ +package mock + +type SpecialAddressHandlerMock struct { + GetElrondCommunityAddressCalled func() []byte + GetLeaderAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) uint32 +} + +func (sh *SpecialAddressHandlerMock) GetElrondCommunityAddress() []byte { + if sh.GetElrondCommunityAddressCalled == nil { + return []byte("elrond") + } + + return sh.GetElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) GetLeaderAddress() []byte { + if sh.GetLeaderAddressCalled == nil { + return []byte("leader") + } + + return sh.GetLeaderAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { + if sh.ShardIdForAddressCalled == nil { + return 0 + } + + return sh.ShardIdForAddressCalled(addr) +} diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index abdefde735f..8eafde84d90 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -1,8 +1,12 @@ package unsigned import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/feeTx" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "math/big" "sync" @@ -12,17 +16,35 @@ const communityPercentage = 0.1 // 10 = 100%, 0 = 0% const leaderPercentage = 0.4 // 10 = 100%, 0 = 0% type feeTxHandler struct { - address process.SpecialAddressHandler - mutTxs sync.Mutex - feeTxs []*feeTx.FeeTx + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mutTxs sync.Mutex + feeTxs []*feeTx.FeeTx feeTxsFromBlock map[string]*feeTx.FeeTx } // NewFeeTxHandler constructor for the fx tee handler -func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, error) { +func NewFeeTxHandler( + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, +) (*feeTxHandler, error) { + if address == nil { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + ftxh := &feeTxHandler{ - address: address, + address: address, + hasher: hasher, + marshalizer: marshalizer, } ftxh.feeTxs = make([]*feeTx.FeeTx, 0) ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) @@ -30,6 +52,46 @@ func NewFeeTxHandler(address process.SpecialAddressHandler) (*feeTxHandler, erro return ftxh, nil } +func (ftxh *feeTxHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { + return nil +} + +func (ftxh *feeTxHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + calculatedFeeTxs := ftxh.CreateAllUTxs() + + miniBlocks := make(map[uint32]*block.MiniBlock) + for _, value := range calculatedFeeTxs { + dstShId := ftxh.address.ShardIdForAddress(value.GetRecvAddress()) + + txHash, err := core.CalculateHash(ftxh.marshalizer, ftxh.hasher, value) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks +} + +// VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block +func (ftxh *feeTxHandler) VerifyInterMiniBlocks(body block.Body) error { + err := ftxh.VerifyCreatedUTxs() + ftxh.CleanProcessedUTxs() + + return err +} + // CleanProcessedUTxs deletes the cached data func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Lock() @@ -103,6 +165,7 @@ func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { } if totalFee.Cmp(big.NewInt(1)) < 0 { + ftxh.feeTxs = make([]*feeTx.FeeTx, 0) return nil } @@ -113,6 +176,8 @@ func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { currFeeTxs = append(currFeeTxs, leaderTx) currFeeTxs = append(currFeeTxs, communityTx) + ftxh.feeTxs = make([]*feeTx.FeeTx, 0) + return currFeeTxs } From 75c9e00015fee4fe2975c6d15c6a791ed1c1a73a Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Tue, 9 Jul 2019 17:15:52 +0300 Subject: [PATCH 013/234] finishing details --- cmd/node/factory/structs.go | 65 +++++++++++++++-------- data/address/specialAddresses.go | 64 ++++++++++++++++++++++ data/errors.go | 12 +++++ process/interface.go | 4 +- process/mock/specialAddressHandlerMock.go | 18 +++---- process/unsigned/feeTxHandler.go | 4 +- 6 files changed, 133 insertions(+), 34 deletions(-) create mode 100644 data/address/specialAddresses.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 791f43070ca..b254826e68e 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/ElrondNetwork/elrond-go/data/address" "io" "math/big" "path/filepath" @@ -53,7 +54,6 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" @@ -1265,16 +1265,46 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - intermediateProcessor, err := preprocess.NewIntermediateResultsProcessor( - core.Hasher, - core.Marshalizer, + specialAddressHolder, err := address.NewSpecialAddressHolder( + []byte("elrond"), + []byte("own"), + state.AddressConverter, + shardCoordinator) + if err != nil { + return nil, nil, err + } + + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, + core.Marshalizer, + core.Hasher, state.AddressConverter, - dataBlock.SmartContractResultBlock, + specialAddressHolder, ) if err != nil { return nil, nil, err } + + interimProcContainer, err := interimProcFactory.Create() + if err != nil { + return nil, nil, err + } + + scResults, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return nil, nil, err + } + + feeTxInterim, err := interimProcContainer.Get(dataBlock.TxFeeBlock) + if err != nil { + return nil, nil, err + } + + feeTxHandler, ok := feeTxInterim.(process.UnsignedTxHandler) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + //TODO: change the mock scProcessor, err := smartContract.NewSmartContractProcessor( &mock.VMExecutionHandlerStub{}, @@ -1285,7 +1315,8 @@ func newShardBlockProcessorAndTracker( vmAccountsDB, state.AddressConverter, shardCoordinator, - intermediateProcessor, + scResults, + feeTxHandler, ) if err != nil { return nil, nil, err @@ -1303,6 +1334,11 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) + if err != nil { + return nil, nil, err + } + transactionProcessor, err := transaction.NewTxProcessor( state.AccountsAdapter, core.Hasher, @@ -1310,6 +1346,8 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, shardCoordinator, scProcessor, + feeTxHandler, + txTypeHandler, ) if err != nil { return nil, nil, errors.New("could not create transaction processor: " + err.Error()) @@ -1347,21 +1385,6 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - core.Marshalizer, - core.Hasher, - state.AddressConverter, - ) - if err != nil { - return nil, nil, err - } - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, nil, err - } - txCoordinator, err := coordinator.NewTransactionCoordinator( shardCoordinator, state.AccountsAdapter, diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go new file mode 100644 index 00000000000..19143ce1ebb --- /dev/null +++ b/data/address/specialAddresses.go @@ -0,0 +1,64 @@ +package address + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type specialAddresses struct { + elrond []byte + ownAddress []byte + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator +} + +// NewSpecialAddressHolder creates a special address holder +func NewSpecialAddressHolder( + elrond []byte, + ownAddress []byte, + adrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, +) (*specialAddresses, error) { + if elrond == nil { + return nil, data.ErrNilElrondAddress + } + if ownAddress == nil { + return nil, data.ErrNilOwnAddress + } + if adrConv == nil { + return nil, data.ErrNilAddressConverter + } + if shardCoordinator == nil { + return nil, data.ErrNilShardCoordinator + } + + sp := &specialAddresses{ + elrond: elrond, + ownAddress: ownAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + } + + return sp, nil +} + +// ElrondCommunityAddress provides elrond address +func (sp *specialAddresses) ElrondCommunityAddress() []byte { + return sp.elrond +} + +// OwnAddress provides own address +func (sp *specialAddresses) OwnAddress() []byte { + return sp.ownAddress +} + +// ShardIdForAddress calculates shard id for address +func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) uint32 { + convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) + if err != nil { + return sp.shardCoordinator.NumberOfShards() + } + + return sp.shardCoordinator.ComputeId(convAdr) +} diff --git a/data/errors.go b/data/errors.go index 8e98b3529d9..08f31c779e8 100644 --- a/data/errors.go +++ b/data/errors.go @@ -48,3 +48,15 @@ var ErrMiniBlockEmpty = errors.New("mini block is empty") // ErrWrongTypeAssertion signals that wrong type was provided var ErrWrongTypeAssertion = errors.New("wrong type assertion") + +// ErrNilElrondAddress signals that nil elrond address was provided +var ErrNilElrondAddress = errors.New("nil elrond address") + +// ErrNilOwnAddress signals that nil own address was provided +var ErrNilOwnAddress = errors.New("nil own address") + +// ErrNilAddressConverter signals that nil address converter was provided +var ErrNilAddressConverter = errors.New("nil address converter") + +// ErrNilShardCoordinator signals that nil shard coordinator was provided +var ErrNilShardCoordinator = errors.New("nil shard coordinator") diff --git a/process/interface.go b/process/interface.go index 35d01ee3ee7..f5d260a537a 100644 --- a/process/interface.go +++ b/process/interface.go @@ -82,8 +82,8 @@ type UnsignedTxHandler interface { // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { - GetElrondCommunityAddress() []byte - GetLeaderAddress() []byte + ElrondCommunityAddress() []byte + OwnAddress() []byte ShardIdForAddress([]byte) uint32 } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 7e6cc1ff027..ae7ef1a75f8 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -1,25 +1,25 @@ package mock type SpecialAddressHandlerMock struct { - GetElrondCommunityAddressCalled func() []byte - GetLeaderAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) uint32 + ElrondCommunityAddressCalled func() []byte + OwnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) uint32 } -func (sh *SpecialAddressHandlerMock) GetElrondCommunityAddress() []byte { - if sh.GetElrondCommunityAddressCalled == nil { +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { return []byte("elrond") } - return sh.GetElrondCommunityAddressCalled() + return sh.ElrondCommunityAddressCalled() } -func (sh *SpecialAddressHandlerMock) GetLeaderAddress() []byte { - if sh.GetLeaderAddressCalled == nil { +func (sh *SpecialAddressHandlerMock) OwnAddress() []byte { + if sh.OwnAddressCalled == nil { return []byte("leader") } - return sh.GetLeaderAddressCalled() + return sh.OwnAddressCalled() } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 8eafde84d90..c40d4c76ea8 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -139,7 +139,7 @@ func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { currTx := &feeTx.FeeTx{} currTx.Value = getPercentageOfValue(totalGathered, leaderPercentage) - currTx.RcvAddr = ftxh.address.GetLeaderAddress() + currTx.RcvAddr = ftxh.address.OwnAddress() return currTx } @@ -148,7 +148,7 @@ func (ftxh *feeTxHandler) createCommunityTx(totalGathered *big.Int) *feeTx.FeeTx currTx := &feeTx.FeeTx{} currTx.Value = getPercentageOfValue(totalGathered, communityPercentage) - currTx.RcvAddr = ftxh.address.GetElrondCommunityAddress() + currTx.RcvAddr = ftxh.address.ElrondCommunityAddress() return currTx } From 84bdee3c58eef6a6148ab052f53d905a1653c9c6 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 10 Jul 2019 09:23:23 +0300 Subject: [PATCH 014/234] process, node, integrationTests: adapt interceptors to validate header signatures on receive Adapt unit tests and integration tests --- cmd/node/factory/structs.go | 12 +- .../frontend/wallet/testInitializer.go | 2 + .../frontend/wallet/txInterception_test.go | 4 +- integrationTests/mock/hasherSpongeMock.go | 33 +++++ integrationTests/mock/multiSigMock.go | 14 ++- .../block/executingMiniblocks_test.go | 3 +- .../multiShard/block/testInitializer.go | 85 +++++++++++-- .../metablock/blocksDissemination_test.go | 7 +- .../multiShard/metablock/testInitializer.go | 52 +++++++- .../multiShard/transaction/testInitializer.go | 4 + .../multisig/belnevMultisig_test.go | 44 +++++++ .../block/interceptedRequestHdr_test.go | 57 ++++++++- .../interceptedRequestTxBlockBody_test.go | 7 +- .../singleShard/block/testInitializer.go | 15 ++- .../transaction/interceptedBulkTx_test.go | 6 +- .../transaction/interceptedResolvedTx_test.go | 9 +- .../transaction/testInitializer.go | 2 + node/node.go | 36 ++++-- process/block/interceptedBlockHeader.go | 51 +++++++- process/block/interceptedBlockHeader_test.go | 7 +- .../block/interceptors/headerInterceptor.go | 2 + .../interceptors/headerInterceptorBase.go | 13 +- .../headerInterceptorBase_test.go | 70 ++++++++++- .../interceptors/headerInterceptor_test.go | 67 ++++++++-- process/errors.go | 6 + .../metachain/interceptorsContainerFactory.go | 7 ++ .../interceptorsContainerFactory_test.go | 39 ++++++ .../shard/interceptorsContainerFactory.go | 7 ++ .../interceptorsContainerFactory_test.go | 51 ++++++++ process/metablock/shardHeaderInterceptor.go | 2 + .../metablock/shardHeaderInterceptor_test.go | 26 +++- process/mock/multiSigMock.go | 68 +++++++--- process/mock/nodesCoordinatorMock.go | 119 ++++++++++++++++++ sharding/indexHashedNodesCoordinator.go | 4 +- sharding/indexHashedNodesCoordinator_test.go | 30 ++--- 35 files changed, 857 insertions(+), 104 deletions(-) create mode 100644 integrationTests/mock/hasherSpongeMock.go create mode 100644 process/mock/nodesCoordinatorMock.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index e07b169d74b..076696196f9 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -390,6 +390,7 @@ type processComponentsFactoryArgs struct { nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator data *Data core *Core crypto *Crypto @@ -428,7 +429,7 @@ func NewProcessComponentsFactoryArgs( // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.data, args.core, args.crypto, args.state, args.network) + args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) if err != nil { return nil, err } @@ -1044,6 +1045,7 @@ func createNetMessenger( func newInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1051,10 +1053,10 @@ func newInterceptorAndResolverContainerFactory( network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, state, network) + return newShardInterceptorAndResolverContainerFactory(shardCoordinator, nodesCoordinator, data, core, crypto, state, network) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, network) + return newMetaInterceptorAndResolverContainerFactory(shardCoordinator, nodesCoordinator, data, core, crypto, network) } return nil, nil, errors.New("could not create interceptor and resolver container factory") @@ -1062,6 +1064,7 @@ func newInterceptorAndResolverContainerFactory( func newShardInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1071,6 +1074,7 @@ func newShardInterceptorAndResolverContainerFactory( //TODO add a real chronology validator and remove null chronology validator interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, @@ -1109,6 +1113,7 @@ func newShardInterceptorAndResolverContainerFactory( func newMetaInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1117,6 +1122,7 @@ func newMetaInterceptorAndResolverContainerFactory( //TODO add a real chronology validator and remove null chronology validator interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, diff --git a/integrationTests/frontend/wallet/testInitializer.go b/integrationTests/frontend/wallet/testInitializer.go index ffdaa753076..44910cc06a0 100644 --- a/integrationTests/frontend/wallet/testInitializer.go +++ b/integrationTests/frontend/wallet/testInitializer.go @@ -127,6 +127,7 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) ( *node.Node, p2p.Messenger, @@ -152,6 +153,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, marshalizer, diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 63e57c56535..1016cbe1dc2 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/core/mock" ) func TestInterceptedTxFromFrontendGeneratedParamsWithoutData(t *testing.T) { @@ -59,8 +60,9 @@ func testInterceptedTxFromFrontendGeneratedParams( accntAdapter := createAccountsDB() shardCoordinator := &sharding.OneShardCoordinator{} + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) - n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator) + n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) //set the account's nonce to startingNonce nodePubKeyBytes, _ := sk.GeneratePublic().ToByteArray() diff --git a/integrationTests/mock/hasherSpongeMock.go b/integrationTests/mock/hasherSpongeMock.go new file mode 100644 index 00000000000..2a1c66b9318 --- /dev/null +++ b/integrationTests/mock/hasherSpongeMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "golang.org/x/crypto/blake2b" +) + +var hasherSpongeEmptyHash []byte + +const hashSize = 16 + +// HasherSpongeMock that will be used for testing +type HasherSpongeMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha HasherSpongeMock) Compute(s string) []byte { + h, _ := blake2b.New(hashSize, nil) + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha HasherSpongeMock) EmptyHash() []byte { + if len(hasherSpongeEmptyHash) == 0 { + hasherSpongeEmptyHash = sha.Compute("") + } + return hasherSpongeEmptyHash +} + +// Size returns the required size in bytes +func (HasherSpongeMock) Size() int { + return hashSize +} diff --git a/integrationTests/mock/multiSigMock.go b/integrationTests/mock/multiSigMock.go index d09ff62e568..9d83ea8035a 100644 --- a/integrationTests/mock/multiSigMock.go +++ b/integrationTests/mock/multiSigMock.go @@ -40,10 +40,10 @@ func NewMultiSigner(nrConsens uint32) *BelNevMock { multisigner.sigs = make([][]byte, nrConsens) multisigner.pubkeys = make([]string, nrConsens) - multisigner.aggCom = []byte("commitment") - multisigner.commHash = []byte("commitment") - multisigner.commSecret = []byte("commitment") - multisigner.aggSig = []byte("commitment") + multisigner.aggCom = []byte("agg commitment") + multisigner.commHash = []byte("commitment hash") + multisigner.commSecret = []byte("commitment secret") + multisigner.aggSig = []byte("aggregated signature") return multisigner } @@ -92,7 +92,11 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 1f91d61a53b..634bf30639e 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -114,6 +114,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { for _, n := range nodes { isNodeInSenderShardAndNotProposer := n.shardId == senderShard && n != proposerNode if isNodeInSenderShardAndNotProposer { + assert.NotNil(t, n.headers, "no headers received") n.blkc.SetGenesisHeaderHash(n.headers[0].GetPrevHash()) err := n.blkProcessor.ProcessBlock( n.blkc, @@ -466,7 +467,7 @@ func proposeBlock(t *testing.T, proposer *testNode, round uint32) (data.BodyHand blockHeader.SetRound(round) blockHeader.SetNonce(uint64(round)) - blockHeader.SetPubKeysBitmap(make([]byte, 0)) + blockHeader.SetPubKeysBitmap([]byte{1, 0, 0}) sig, _ := testMultiSig.AggregateSigs(nil) blockHeader.SetSignature(sig) currHdr := proposer.blkc.GetCurrentBlockHeader() diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index b64658c300a..40cc32e698d 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -55,6 +55,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "math/big" ) var r *rand.Rand @@ -182,12 +183,14 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, ) ( *node.Node, p2p.Messenger, crypto.PrivateKey, + crypto.PublicKey, dataRetriever.ResolversFinder, process.BlockProcessor, data.ChainHandler) { @@ -217,6 +220,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, testMarshalizer, @@ -318,7 +322,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, resolversFinder, blockProcessor, blkc + return n, messenger, sk, pk, resolversFinder, blockProcessor, blkc } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -388,6 +392,21 @@ func displayAndStartNodes(nodes []*testNode) { } } +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + func createNodes( numOfShards int, nodesPerShard int, @@ -397,24 +416,39 @@ func createNodes( //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 nodes := make([]*testNode, int(numOfShards)*nodesPerShard+numMetaChainNodes) + nodesCoordinators := make(map[uint32][]sharding.NodesCoordinator) + nodesPublicKeys := make(map[uint32][]string) idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { + shardNodesCoordinators := make([]sharding.NodesCoordinator, 0) + shardPubKeys := make([]string, 0) + for j := 0; j < nodesPerShard; j++ { testNode := &testNode{ dPool: createTestShardDataPool(), shardId: uint32(shardId), } - shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + ) + shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) + accntAdapter := createAccountsDB() - n, mes, sk, resFinder, blkProcessor, blkc := createNetNode( + n, mes, sk, pk, resFinder, blkProcessor, blkc := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, + nodesCoordinator, testNode.shardId, serviceID, ) + pubKeyBytes, _ := pk.ToByteArray() + shardPubKeys = append(shardPubKeys, string(pubKeyBytes)) _ = n.CreateShardedStores() testNode.node = n @@ -459,20 +493,49 @@ func createNodes( nodes[idx] = testNode idx++ } + + nodesCoordinators[uint32(shardId)] = shardNodesCoordinators + nodesPublicKeys[uint32(shardId)] = shardPubKeys } - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - tn := createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - serviceID, - ) + metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) + metaNodesPubKeys := make([]string, 0) + for i := 0; i < numMetaChainNodes; i++ { + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + ) + + tn := createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + ) + + metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) + pkBytes, _ := tn.pk.ToByteArray() + metaNodesPubKeys = append(metaNodesPubKeys, string(pkBytes)) + idx := i + int(numOfShards)*nodesPerShard nodes[idx] = tn } + nodesCoordinators[sharding.MetachainShardId] = metaNodesCoordinators + nodesPublicKeys[sharding.MetachainShardId] = metaNodesPubKeys + mapValidators := genValidatorsFromPubKeys(nodesPublicKeys) + + for _, shardCoord := range nodesCoordinators { + for j := 0; j < len(shardCoord); j++ { + shardCoord[j].LoadNodesPerShards(mapValidators) + } + } + return nodes } @@ -608,6 +671,7 @@ func createMetaNetNode( dPool dataRetriever.MetaPoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, ) *testNode { @@ -628,6 +692,7 @@ func createMetaNetNode( interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index 9a3ffdf15c9..3e1d7aa039f 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -36,6 +36,7 @@ func TestHeadersAreReceivedByMetachainAndShard(t *testing.T) { numMetaNodes, senderShard, getConnectableAddress(advertiser), + testHasher, ) displayAndStartNodes(nodes) @@ -101,6 +102,7 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { numMetaNodes, senderShard, getConnectableAddress(advertiser), + testHasher, ) displayAndStartNodes(nodes) @@ -189,13 +191,12 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { for _, n := range nodes { assert.Equal(t, int32(2), atomic.LoadInt32(&n.metachainHdrRecv)) } - } func generateHeaderAndBody(senderShard uint32, recvShards ...uint32) (data.BodyHandler, data.HeaderHandler) { hdr := block.Header{ Nonce: 0, - PubKeysBitmap: []byte{255, 0}, + PubKeysBitmap: []byte{1, 0, 0}, Signature: []byte("signature"), PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), @@ -238,7 +239,7 @@ func generateHeaderAndBody(senderShard uint32, recvShards ...uint32) (data.BodyH func generateMetaHeader() data.HeaderHandler { hdr := block.MetaBlock{ Nonce: 0, - PubKeysBitmap: []byte{255, 0}, + PubKeysBitmap: []byte{1, 0, 0}, Signature: []byte("signature"), PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 6540e918163..32f12487eda 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -32,6 +32,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" @@ -52,6 +53,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "math/big" ) var r *rand.Rand @@ -81,6 +83,21 @@ type testNode struct { //------- Common +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + func createMemUnit() storage.Storer { cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) persist, _ := memorydb.New() @@ -171,26 +188,56 @@ func createNodes( nodesInMetachain int, senderShard uint32, initialAddr string, + hasher hashing.Hasher, ) []*testNode { + nodesCoordMap := make(map[uint32][]sharding.NodesCoordinator) + pkMap := make(map[uint32][]string) + nodes := make([]*testNode, nodesInMetachain+1) //first node is a shard node shardCoordinator, _ := sharding.NewMultiShardCoordinator(1, senderShard) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, senderShard, 1) + nodes[0] = createShardNetNode( createTestShardDataPool(), createAccountsDB(), shardCoordinator, + nodesCoordinator, initialAddr, ) + pk, _ := nodes[0].pk.ToByteArray() + shard0NodesCoord := make([]sharding.NodesCoordinator, 1) + shard0NodesCoord[0] = nodesCoordinator + nodesCoordMap[0] = shard0NodesCoord + pkMap[0] = []string{string(pk)} + + metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) + metaPubKeys := make([]string, 0) for i := 0; i < nodesInMetachain; i++ { shardCoordinator, _ = sharding.NewMultiShardCoordinator(1, sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, sharding.MetachainShardId, 1) nodes[i+1] = createMetaNetNode( createTestMetaDataPool(), createAccountsDB(), shardCoordinator, + nodesCoordinator, initialAddr, ) + metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) + pk, _ := nodes[i+1].pk.ToByteArray() + metaPubKeys = append(metaPubKeys, string(pk)) + } + + nodesCoordMap[sharding.MetachainShardId] = metaNodesCoordinators + pkMap[sharding.MetachainShardId] = metaPubKeys + valMap := genValidatorsFromPubKeys(pkMap) + + for _, nodeCoordList := range nodesCoordMap { + for _, nodeCoord := range nodeCoordList { + nodeCoord.LoadNodesPerShards(valMap) + } } return nodes @@ -261,11 +308,11 @@ func createShardNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, ) *testNode { tn := testNode{} - tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) suite := kyber.NewBlakeSHA256Ed25519() singleSigner := &singlesig.SchnorrSigner{} @@ -283,6 +330,7 @@ func createShardNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, @@ -442,6 +490,7 @@ func createMetaNetNode( dPool dataRetriever.MetaPoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, ) *testNode { @@ -462,6 +511,7 @@ func createMetaNetNode( interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, diff --git a/integrationTests/multiShard/transaction/testInitializer.go b/integrationTests/multiShard/transaction/testInitializer.go index 3709f27a5d8..75602d1fc15 100644 --- a/integrationTests/multiShard/transaction/testInitializer.go +++ b/integrationTests/multiShard/transaction/testInitializer.go @@ -215,6 +215,7 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, ) ( @@ -237,6 +238,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, marshalizer, @@ -369,6 +371,7 @@ func createNode( } shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(3, hasher, uint32(shardId), uint32(numOfShards)) accntAdapter := createAccountsDB() var n *node.Node var mes p2p.Messenger @@ -379,6 +382,7 @@ func createNode( testNode.dPool, accntAdapter, shardCoordinator, + nodesCoordinator, skShardId, serviceID, ) diff --git a/integrationTests/multisig/belnevMultisig_test.go b/integrationTests/multisig/belnevMultisig_test.go index 2d2fa0de35c..30f97aa7e01 100644 --- a/integrationTests/multisig/belnevMultisig_test.go +++ b/integrationTests/multisig/belnevMultisig_test.go @@ -228,3 +228,47 @@ func TestBelnev_MultiSigningMultipleSignersOK(t *testing.T) { err = verifySigAllSigners(multiSigners, message, aggSig, pubKeysStr, bitmap, consensusGroupSize) assert.Nil(t, err) } + +func TestBelnev_MultiSigningMultipleSignersVerifyWithNodesCoordinatorWrongGroup(t *testing.T) { + consensusGroupSize := uint16(21) + suite := kyber.NewBlakeSHA256Ed25519() + kg := signing.NewKeyGenerator(suite) + + privKeys, pubKeysStr := generateKeyPairs(kg, consensusGroupSize) + hasher := sha256.Sha256{} + + multiSigners, err := createMultiSigners(kg, hasher, privKeys, pubKeysStr) + assert.Nil(t, err) + + err = createAndSetCommitmentsAllSigners(multiSigners) + assert.Nil(t, err) + + bitmapSize := consensusGroupSize/8 + 1 + // set bitmap to select all 21 members + bitmap := make([]byte, bitmapSize) + byteMask := 0xFF + + for i := uint16(0); i < bitmapSize; i++ { + bitmap[i] = byte((((1 << consensusGroupSize) - 1) >> i) & byteMask) + } + + err = aggregateCommitmentsForAllSigners(multiSigners, bitmap, consensusGroupSize) + assert.Nil(t, err) + + message := []byte("message to be signed") + assert.Nil(t, err) + + err = createAndSetSignatureSharesAllSigners(multiSigners, message, bitmap) + assert.Nil(t, err) + + aggSig, err := aggregateSignatureSharesAllSigners(multiSigners, bitmap, consensusGroupSize) + assert.Nil(t, err) + + err = verifySigAllSigners(multiSigners, message, aggSig, pubKeysStr, bitmap, consensusGroupSize) + assert.Nil(t, err) +} + +func TestBelnev_MultiSigningMultipleSignersVerifyWithNodesCoordinatorOK(t *testing.T) { + +} + diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index eb0127ee7f1..93f05ad2cfa 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -2,10 +2,12 @@ package block import ( "fmt" + "math/big" "reflect" "testing" "time" + "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/hashing/sha256" @@ -15,6 +17,21 @@ import ( "github.com/stretchr/testify/assert" ) +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -27,18 +44,37 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} + nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + nodesCoordinator2, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) fmt.Println("Requestor:") - nRequestor, mesRequestor, _, resolversFinder := createNetNode( + nRequestor, mesRequestor, _, pk1, multiSigner, resolversFinder := createNetNode( dPoolRequestor, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator1, + ) fmt.Println("Resolver:") - nResolver, mesResolver, _, _ := createNetNode( + nResolver, mesResolver, _, pk2, _, _ := createNetNode( dPoolResolver, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator2, + ) + + pubKeyMap := make(map[uint32][]string) + shard0PubKeys := make([]string, 2) + pk1Bytes, _ := pk1.ToByteArray() + pk2Bytes, _ := pk2.ToByteArray() + + shard0PubKeys[0] = string(pk1Bytes) + shard0PubKeys[1] = string(pk2Bytes) + + pubKeyMap[0] = shard0PubKeys + validatorsMap := genValidatorsFromPubKeys(pubKeyMap) + nodesCoordinator1.LoadNodesPerShards(validatorsMap) + nodesCoordinator2.LoadNodesPerShards(validatorsMap) nRequestor.Start() nResolver.Start() @@ -57,8 +93,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { //Step 1. Generate a header hdr := block.Header{ Nonce: 0, - PubKeysBitmap: []byte{255, 0}, - Signature: []byte("signature"), + PubKeysBitmap: nil, + Signature: nil, PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), Round: 1, @@ -72,6 +108,15 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { } hdrBuff, _ := marshalizer.Marshal(&hdr) + msig, _ := multiSigner.Create(shard0PubKeys, 0) + bitmap := []byte{1, 0, 0} + _, _ = msig.CreateSignatureShare(hdrBuff, bitmap) + aggSig, _ := msig.AggregateSigs(bitmap) + + hdr.PubKeysBitmap = bitmap + hdr.Signature = aggSig + + hdrBuff, _ = marshalizer.Marshal(&hdr) hdrHash := hasher.Compute(string(hdrBuff)) //Step 2. resolver has the header diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go index 10bba376a20..d49ce454864 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go @@ -28,19 +28,22 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) fmt.Println("Requestor: ") - nRequestor, mesRequestor, _, resolversFinder := createNetNode( + nRequestor, mesRequestor, _, _, _, resolversFinder := createNetNode( dPoolRequestor, createAccountsDB(), shardCoordinator, + nodesCoordinator, ) fmt.Println("Resolver:") - nResolver, mesResolver, _, _ := createNetNode( + nResolver, mesResolver, _, _, _, _ := createNetNode( dPoolResolver, createAccountsDB(), shardCoordinator, + nodesCoordinator, ) nRequestor.Start() diff --git a/integrationTests/singleShard/block/testInitializer.go b/integrationTests/singleShard/block/testInitializer.go index f84853f75df..3ef42369763 100644 --- a/integrationTests/singleShard/block/testInitializer.go +++ b/integrationTests/singleShard/block/testInitializer.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + llsig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/data" @@ -128,7 +129,8 @@ func createMultiSigner( publicKeys := make([]string, 1) pubKey, _ := publicKey.ToByteArray() publicKeys[0] = string(pubKey) - multiSigner, err := multisig.NewBelNevMultisig(hasher, publicKeys, privateKey, keyGen, 0) + llsigner := &llsig.KyberMultiSignerBLS{} + multiSigner, err := multisig.NewBLSMultisig(llsigner, hasher, publicKeys, privateKey, keyGen, 0) return multiSigner, err } @@ -152,24 +154,28 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) ( *node.Node, p2p.Messenger, crypto.PrivateKey, + crypto.PublicKey, + crypto.MultiSigner, dataRetriever.ResolversFinder) { hasher := sha256.Sha256{} + hasherSigning := &mock.HasherSpongeMock{} marshalizer := &marshal.JsonMarshalizer{} messenger := createMessenger(context.Background()) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - suite := kyber.NewBlakeSHA256Ed25519() + suite := kyber.NewSuitePairingBn256() singleSigner := &singlesig.SchnorrSigner{} keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() - multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasher) + multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasherSigning) blkc := createTestBlockChain() store := createTestStore() uint64Converter := uint64ByteSlice.NewBigEndianConverter() @@ -177,6 +183,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, marshalizer, @@ -222,7 +229,7 @@ func createNetNode( node.WithDataStore(store), ) - return n, messenger, sk, resolversFinder + return n, messenger, sk, pk, multiSigner, resolversFinder } func createMessenger(ctx context.Context) p2p.Messenger { diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go index 22d9d66563e..05c985c16cd 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" ) func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { @@ -21,6 +22,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { t.Skip("this is not a short test") } + hasher := sha256.Sha256{} dPool := createTestDataPool() startingNonce := uint64(6) @@ -29,8 +31,8 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { accntAdapter := createAccountsDB() shardCoordinator := &sharding.OneShardCoordinator{} - - n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) n.Start() defer n.Stop() diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index fe529002bf9..865161d1dd1 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -29,18 +29,23 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) fmt.Println("Requestor:") nRequestor, mesRequestor, sk1, resolversFinder := createNetNode( dPoolRequestor, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator, + ) fmt.Println("Resolver:") nResolver, mesResolver, _, _ := createNetNode( dPoolResolver, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator, + ) nRequestor.Start() nResolver.Start() diff --git a/integrationTests/singleShard/transaction/testInitializer.go b/integrationTests/singleShard/transaction/testInitializer.go index 928056d6e7f..fb991c8e27d 100644 --- a/integrationTests/singleShard/transaction/testInitializer.go +++ b/integrationTests/singleShard/transaction/testInitializer.go @@ -165,6 +165,7 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) ( *node.Node, p2p.Messenger, @@ -190,6 +191,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, marshalizer, diff --git a/node/node.go b/node/node.go index 6bcbeb943d2..edb0fd72c51 100644 --- a/node/node.go +++ b/node/node.go @@ -484,7 +484,7 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { // createNodesCoordinator creates a index hashed group selector object func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { - nCoordinator, err := sharding.NewIndexHashedGroupSelector( + nCoordinator, err := sharding.NewIndexHashedNodesCoordinator( n.consensusGroupSize, n.hasher, n.shardCoordinator.SelfId(), @@ -498,19 +498,17 @@ func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { nbShards := n.shardCoordinator.NumberOfShards() for sh := uint32(0); sh < nbShards; sh++ { - nodesInShard := len(n.initialNodesPubkeys[sh]) - nodesMap[sh] = make([]sharding.Validator, nodesInShard) - - for i := 0; i < nodesInShard; i++ { - validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[sh][i])) - if err != nil { - return nil, err - } - - nodesMap[sh][i] = validator + err = n.createValidatorsForShard(nodesMap, sh) + if err != nil { + return nil, err } } + err = n.createValidatorsForShard(nodesMap, sharding.MetachainShardId) + if err != nil { + return nil, err + } + err = nCoordinator.LoadNodesPerShards(nodesMap) if err != nil { return nil, err @@ -519,6 +517,22 @@ func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { return nCoordinator, nil } +func (n *Node) createValidatorsForShard(nodesMap map[uint32][]sharding.Validator, shId uint32) (err error) { + nodesInShard := len(n.initialNodesPubkeys[shId]) + nodesMap[shId] = make([]sharding.Validator, nodesInShard) + + for i := 0; i < nodesInShard; i++ { + validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shId][i])) + if err != nil { + return err + } + + nodesMap[shId][i] = validator + } + + return nil +} + // createConsensusTopic creates a consensus topic for node func (n *Node) createConsensusTopic(messageProcessor p2p.MessageProcessor, shardCoordinator sharding.Coordinator) error { if shardCoordinator == nil { diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 8875ab7e96a..a4aae581f79 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/marshal" ) // InterceptedHeader represents the wrapper over HeaderWrapper struct. @@ -14,18 +15,24 @@ type InterceptedHeader struct { multiSigVerifier crypto.MultiSigVerifier chronologyValidator process.ChronologyValidator hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedHeader( multiSigVerifier crypto.MultiSigVerifier, chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, ) *InterceptedHeader { return &InterceptedHeader{ Header: &block.Header{}, multiSigVerifier: multiSigVerifier, chronologyValidator: chronologyValidator, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, } } @@ -121,10 +128,46 @@ func (inHdr *InterceptedHeader) validityCheck() error { // VerifySig verifies a signature func (inHdr *InterceptedHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers and in the bitmap - return nil + randSeed := inHdr.GetPrevRandSeed() + bitmap := inHdr.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap + } + + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing + + } + consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed) + if err != nil { + return err + } + + verifier, err := inHdr.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(inHdr.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *inHdr.Header + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + headerBytes, err := inHdr.marshalizer.Marshal(headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(headerBytes, bitmap) + + return err } func (inHdr *InterceptedHeader) validatePeerBlock() error { diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index ead332a440a..aa8fea0d37f 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -11,6 +11,7 @@ import ( ) func createTestInterceptedHeader() *block.InterceptedHeader { + return block.NewInterceptedHeader( mock.NewMultiSigner(), &mock.ChronologyValidatorStub{ @@ -18,6 +19,8 @@ func createTestInterceptedHeader() *block.InterceptedHeader { return nil }, }, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, ) } @@ -251,6 +254,8 @@ func TestInterceptedHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t hdr := block.NewInterceptedHeader( mock.NewMultiSigner(), nil, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, ) hdr.PrevHash = make([]byte, 0) hdr.PubKeysBitmap = make([]byte, 0) @@ -283,7 +288,7 @@ func TestInterceptedHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = block2.PeerBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go index addd6b20a5e..7abf703fd56 100644 --- a/process/block/interceptors/headerInterceptor.go +++ b/process/block/interceptors/headerInterceptor.go @@ -30,6 +30,7 @@ func NewHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, chronologyValidator process.ChronologyValidator, ) (*HeaderInterceptor, error) { @@ -45,6 +46,7 @@ func NewHeaderInterceptor( multiSigVerifier, hasher, shardCoordinator, + nodesCoordinator, chronologyValidator, ) if err != nil { diff --git a/process/block/interceptors/headerInterceptorBase.go b/process/block/interceptors/headerInterceptorBase.go index b10892f8f02..4fbc2ba49fc 100644 --- a/process/block/interceptors/headerInterceptorBase.go +++ b/process/block/interceptors/headerInterceptorBase.go @@ -18,6 +18,7 @@ type HeaderInterceptorBase struct { multiSigVerifier crypto.MultiSigVerifier hasher hashing.Hasher shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator chronologyValidator process.ChronologyValidator } @@ -28,6 +29,7 @@ func NewHeaderInterceptorBase( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, chronologyValidator process.ChronologyValidator, ) (*HeaderInterceptorBase, error) { if marshalizer == nil { @@ -45,6 +47,9 @@ func NewHeaderInterceptorBase( if shardCoordinator == nil { return nil, process.ErrNilShardCoordinator } + if nodesCoordinator == nil { + return nil, process.ErrNilNodesCoordinator + } if chronologyValidator == nil { return nil, process.ErrNilChronologyValidator } @@ -55,6 +60,7 @@ func NewHeaderInterceptorBase( multiSigVerifier: multiSigVerifier, hasher: hasher, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, chronologyValidator: chronologyValidator, } @@ -71,7 +77,12 @@ func (hib *HeaderInterceptorBase) ParseReceivedMessage(message p2p.MessageP2P) ( return nil, process.ErrNilDataToProcess } - hdrIntercepted := block.NewInterceptedHeader(hib.multiSigVerifier, hib.chronologyValidator) + hdrIntercepted := block.NewInterceptedHeader( + hib.multiSigVerifier, + hib.chronologyValidator, + hib.nodesCoordinator, + hib.marshalizer, + ) err := hib.marshalizer.Unmarshal(hdrIntercepted, message.Data()) if err != nil { return nil, err diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index 4760786ad5d..4a9841ae8cc 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -10,6 +10,10 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/consensus" + "math/big" + "fmt" ) //------- NewHeaderInterceptorBase @@ -24,6 +28,7 @@ func TestNewHeaderInterceptorBase_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -40,6 +45,7 @@ func TestNewHeaderInterceptorBase_NilStorerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -57,6 +63,7 @@ func TestNewHeaderInterceptorBase_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -74,6 +81,7 @@ func TestNewHeaderInterceptorBase_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -91,6 +99,7 @@ func TestNewHeaderInterceptorBase_NilShardCoordinatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, nil, + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -98,6 +107,24 @@ func TestNewHeaderInterceptorBase_NilShardCoordinatorShouldErr(t *testing.T) { assert.Nil(t, hi) } +func TestNewHeaderInterceptorBase_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + storer := &mock.StorerStub{} + hi, err := interceptors.NewHeaderInterceptorBase( + &mock.MarshalizerMock{}, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + &mock.ChronologyValidatorStub{}, + ) + + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, hi) +} + func TestNewHeaderInterceptorBase_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -108,6 +135,7 @@ func TestNewHeaderInterceptorBase_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -127,6 +155,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageNilMessageShouldErr(t *testin mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -146,6 +175,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageNilDataToProcessShouldErr(t * mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -172,6 +202,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageMarshalizerErrorsAtUnmarshali mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -195,16 +226,19 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t return nil }, } + + nodesCoordinator := mock.NewNodesCoordinatorMock() hib, _ := interceptors.NewHeaderInterceptorBase( marshalizer, storer, multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -215,6 +249,32 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t assert.Equal(t, process.ErrNilPubKeysBitmap, err) } +func createNodesCoordinator() sharding.NodesCoordinator { + validators := make(map[uint32][]sharding.Validator, 0) + + //shard0 + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < 16; i++ { + pubKeyStr := fmt.Sprintf("pk_shard0_%d", i) + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pubKeyStr)) + shardValidators = append(shardValidators, v) + } + + //metachain + metachainValidators := make([]sharding.Validator, 0) + pubKeyBytes := []byte("pk_meta") + v, _ := consensus.NewValidator(big.NewInt(0), 1, pubKeyBytes) + metachainValidators = append(metachainValidators, v) + + validators[0] = shardValidators + validators[sharding.MetachainShardId] = metachainValidators + + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodesCoordinator.LoadNodesPerShards(validators) + + return nodesCoordinator +} + func TestHeaderInterceptorBase_ParseReceivedMessageValsOkShouldWork(t *testing.T) { t.Parallel() @@ -230,20 +290,24 @@ func TestHeaderInterceptorBase_ParseReceivedMessageValsOkShouldWork(t *testing.T storer.HasCalled = func(key []byte) error { return errors.New("Key not found") } + + nodesCoordinator := createNodesCoordinator() + hib, _ := interceptors.NewHeaderInterceptorBase( marshalizer, storer, multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index b7d2f15bec2..1bea9e9dfeb 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -13,10 +13,39 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/consensus" + "math/big" + "fmt" ) var durTimeout = time.Duration(time.Second) +func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32][]sharding.Validator { + nodes := make(map[uint32][]sharding.Validator) + + // shards + for shard := uint32(0); shard < nbShards; shard++ { + shardNodes := make([]sharding.Validator, 0) + for valIdx := uint32(0); valIdx < shardSize; valIdx++ { + pk := fmt.Sprintf("pubKey_sh%d_node%d", shard, valIdx) + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pk)) + shardNodes = append(shardNodes, v) + } + nodes[shard] = shardNodes + } + + metaNodes := make([]sharding.Validator, 0) + for mValIdx := uint32(0); mValIdx < metachainSize; mValIdx++ { + pk := fmt.Sprintf("pubKey_meta_node%d", mValIdx) + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pk)) + metaNodes = append(metaNodes, v) + } + nodes[sharding.MetachainShardId] = metaNodes + + return nodes +} + //------- NewHeaderInterceptor func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { @@ -34,6 +63,7 @@ func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -55,6 +85,7 @@ func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -76,6 +107,7 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -98,6 +130,7 @@ func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -122,6 +155,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing. mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.ChronologyValidatorStub{}, ) @@ -157,6 +191,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) return errors.New("Key not found") } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + nodesCoordinator.LoadNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -165,14 +203,15 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -235,6 +274,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test return nil } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + nodesCoordinator.LoadNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -243,14 +286,15 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -310,6 +354,15 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( shardCoordinator.CurrentShard = 2 shardCoordinator.SetNoShards(5) + nodesCoordinator := &mock.NodesCoordinatorMock{ + NbShards: 5, + ConsensusSize: 1, + ShardId: 2, + } + + nodes := generateValidatorsMap(3, 3, 5) + nodesCoordinator.LoadNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -318,14 +371,15 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( multisigner, mock.HasherMock{}, shardCoordinator, + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -348,5 +402,4 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( } assert.Nil(t, hi.ProcessReceivedMessage(msg)) - } diff --git a/process/errors.go b/process/errors.go index 56d3bc01f77..202024e01d6 100644 --- a/process/errors.go +++ b/process/errors.go @@ -163,6 +163,9 @@ var ErrNegativeValue = errors.New("negative value") // ErrNilShardCoordinator signals that an operation has been attempted to or with a nil shard coordinator var ErrNilShardCoordinator = errors.New("nil shard coordinator") +// ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + // ErrInvalidRcvAddr signals that an operation has been attempted to or with an invalid receiver address var ErrInvalidRcvAddr = errors.New("invalid receiver address") @@ -175,6 +178,9 @@ var ErrNilKeyGen = errors.New("nil key generator") // ErrNilSingleSigner signals that a nil single signer is used var ErrNilSingleSigner = errors.New("nil single signer") +// ErrBlockProposerSignatureMissing signals that block proposer signature is missing from the block aggregated sig +var ErrBlockProposerSignatureMissing = errors.New("block proposer signature is missing") + // ErrNilMultiSigVerifier signals that a nil multi-signature verifier is used var ErrNilMultiSigVerifier = errors.New("nil multi-signature verifier") diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index 68cc37cc35d..b64c3c91f37 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -20,6 +20,7 @@ type interceptorsContainerFactory struct { store dataRetriever.StorageService dataPool dataRetriever.MetaPoolsHolder shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator messenger process.TopicHandler multiSigner crypto.MultiSigner chronologyValidator process.ChronologyValidator @@ -29,6 +30,7 @@ type interceptorsContainerFactory struct { // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, @@ -41,6 +43,9 @@ func NewInterceptorsContainerFactory( if shardCoordinator == nil { return nil, process.ErrNilShardCoordinator } + if nodesCoordinator == nil { + return nil, process.ErrNilNodesCoordinator + } if messenger == nil { return nil, process.ErrNilMessenger } @@ -65,6 +70,7 @@ func NewInterceptorsContainerFactory( return &interceptorsContainerFactory{ shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, messenger: messenger, store: store, marshalizer: marshalizer, @@ -175,6 +181,7 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(identif icf.multiSigner, icf.hasher, icf.shardCoordinator, + icf.nodesCoordinator, icf.chronologyValidator, ) if err != nil { diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 34c6d3ebd72..442ae071b69 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -81,6 +81,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := metachain.NewInterceptorsContainerFactory( nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -94,11 +95,31 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.ChronologyValidatorStub{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, @@ -117,6 +138,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, @@ -135,6 +157,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, @@ -153,6 +176,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -171,6 +195,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -189,6 +214,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -207,6 +233,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -227,6 +254,7 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -247,6 +275,7 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.ShardHeadersForMetachainTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -267,6 +296,7 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -287,6 +317,7 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.ShardHeadersForMetachainTopic), createStore(), &mock.MarshalizerMock{}, @@ -307,6 +338,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -338,8 +370,15 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + icf, _ := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index a047c63fdd8..8777fab1a55 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -26,12 +26,14 @@ type interceptorsContainerFactory struct { multiSigner crypto.MultiSigner dataPool dataRetriever.PoolsHolder addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator chronologyValidator process.ChronologyValidator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, @@ -74,12 +76,16 @@ func NewInterceptorsContainerFactory( if addrConverter == nil { return nil, process.ErrNilAddressConverter } + if nodesCoordinator == nil { + return nil, process.ErrNilNodesCoordinator + } if chronologyValidator == nil { return nil, process.ErrNilChronologyValidator } return &interceptorsContainerFactory{ shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, messenger: messenger, store: store, marshalizer: marshalizer, @@ -297,6 +303,7 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p icf.multiSigner, icf.hasher, icf.shardCoordinator, + icf.nodesCoordinator, icf.chronologyValidator, ) if err != nil { diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 45d95c80358..78a7cf98294 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -86,6 +86,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := shard.NewInterceptorsContainerFactory( nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -102,11 +103,34 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + &mock.ChronologyValidatorStub{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, @@ -128,6 +152,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, @@ -149,6 +174,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, @@ -170,6 +196,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -191,6 +218,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -212,6 +240,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -233,6 +262,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -254,6 +284,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -275,6 +306,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -296,6 +328,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -319,6 +352,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.TransactionTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -342,6 +376,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.HeadersTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -365,6 +400,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MiniBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -388,6 +424,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.PeerChBodyTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -411,6 +448,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -434,6 +472,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.TransactionTopic), createStore(), &mock.MarshalizerMock{}, @@ -457,6 +496,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.HeadersTopic), createStore(), &mock.MarshalizerMock{}, @@ -480,6 +520,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MiniBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -503,6 +544,7 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.PeerChBodyTopic), createStore(), &mock.MarshalizerMock{}, @@ -526,6 +568,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -549,6 +592,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := shard.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -583,8 +627,15 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardId: 1, + ConsensusSize: 1, + NbShards: uint32(noOfShards), + } + icf, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil diff --git a/process/metablock/shardHeaderInterceptor.go b/process/metablock/shardHeaderInterceptor.go index 740b457d13e..bba8aef1e0f 100644 --- a/process/metablock/shardHeaderInterceptor.go +++ b/process/metablock/shardHeaderInterceptor.go @@ -36,6 +36,7 @@ func NewShardHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, chronologyValidator process.ChronologyValidator, ) (*ShardHeaderInterceptor, error) { @@ -52,6 +53,7 @@ func NewShardHeaderInterceptor( multiSigVerifier, hasher, shardCoordinator, + nodesCoordinator, chronologyValidator, ) if err != nil { diff --git a/process/metablock/shardHeaderInterceptor_test.go b/process/metablock/shardHeaderInterceptor_test.go index b23be7bb337..c812eab2dee 100644 --- a/process/metablock/shardHeaderInterceptor_test.go +++ b/process/metablock/shardHeaderInterceptor_test.go @@ -31,6 +31,7 @@ func TestNewShardHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + &mock.NodesCoordinatorMock{}, &mock.ChronologyValidatorStub{}, ) @@ -50,6 +51,7 @@ func TestNewShardHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + &mock.NodesCoordinatorMock{}, &mock.ChronologyValidatorStub{}, ) @@ -70,6 +72,7 @@ func TestNewShardHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + &mock.NodesCoordinatorMock{}, &mock.ChronologyValidatorStub{}, ) @@ -92,6 +95,7 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *tes mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + &mock.NodesCoordinatorMock{}, &mock.ChronologyValidatorStub{}, ) @@ -123,6 +127,8 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testin return true } + nodesCoordinator := &mock.NodesCoordinatorMock{} + hi, _ := metablock.NewShardHeaderInterceptor( marshalizer, headers, @@ -131,14 +137,15 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testin multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -186,6 +193,7 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T return errors.New("Key not found") } hdrsNonces := &mock.Uint64CacherStub{} + nodesCoordinator := &mock.NodesCoordinatorMock{} hi, _ := metablock.NewShardHeaderInterceptor( marshalizer, @@ -195,14 +203,15 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T multisigner, mock.HasherMock{}, mock.NewMultiShardsCoordinatorMock(2), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -255,6 +264,9 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t storer.HasCalled = func(key []byte) error { return nil } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hi, _ := metablock.NewShardHeaderInterceptor( marshalizer, headers, @@ -263,14 +275,16 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = make([]byte, 2) + hdr.PubKeysBitmap[0] = 1 hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) diff --git a/process/mock/multiSigMock.go b/process/mock/multiSigMock.go index 5023a44ab56..6e12a9b2388 100644 --- a/process/mock/multiSigMock.go +++ b/process/mock/multiSigMock.go @@ -3,6 +3,7 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/hashing" + "bytes" ) // BelNevMock is used to mock belare neven multisignature scheme @@ -77,13 +78,28 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + if msg == nil { + return crypto.ErrNilMessage + } + + if bitmap == nil { + return crypto.ErrNilBitmap + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point func (bnm *BelNevMock) CreateCommitment() (commSecret []byte, commitment []byte) { + if bnm.CreateCommitmentMock != nil { + return bnm.CreateCommitmentMock() + } - return bnm.CreateCommitmentMock() + return []byte("commitment secret"), []byte("commitment") } // StoreCommitmentHash adds a commitment hash to the list on the specified position @@ -92,18 +108,18 @@ func (bnm *BelNevMock) StoreCommitmentHash(index uint16, commHash []byte) error bnm.commHash = commHash return nil - } else { - return bnm.StoreCommitmentHashMock(index, commHash) } + + return bnm.StoreCommitmentHashMock(index, commHash) } // CommitmentHash returns the commitment hash from the list on the specified position func (bnm *BelNevMock) CommitmentHash(index uint16) ([]byte, error) { if bnm.CommitmentHashMock == nil { return bnm.commHash, nil - } else { - return bnm.CommitmentHashMock(index) } + + return bnm.CommitmentHashMock(index) } // StoreCommitment adds a commitment to the list on the specified position @@ -116,9 +132,9 @@ func (bnm *BelNevMock) StoreCommitment(index uint16, value []byte) error { bnm.commitments[index] = value return nil - } else { - return bnm.StoreCommitmentMock(index, value) } + + return bnm.StoreCommitmentMock(index, value) } // Commitment returns the commitment from the list with the specified position @@ -129,19 +145,27 @@ func (bnm *BelNevMock) Commitment(index uint16) ([]byte, error) { } return bnm.commitments[index], nil - } else { - return bnm.CommitmentMock(index) } + + return bnm.CommitmentMock(index) } // AggregateCommitments aggregates the list of commitments func (bnm *BelNevMock) AggregateCommitments(bitmap []byte) error { - return bnm.AggregateCommitmentsMock(bitmap) + if bnm.AggregateCommitmentsMock != nil { + return bnm.AggregateCommitmentsMock(bitmap) + } + + return nil } // CreateSignatureShare creates a partial signature func (bnm *BelNevMock) CreateSignatureShare(msg []byte, bitmap []byte) ([]byte, error) { - return bnm.CreateSignatureShareMock(msg, bitmap) + if bnm.CreateSignatureShareMock != nil { + return bnm.CreateSignatureShareMock(msg, bitmap) + } + + return []byte("signature share"), nil } // StoreSignatureShare adds the partial signature of the signer with specified position @@ -156,12 +180,28 @@ func (bnm *BelNevMock) StoreSignatureShare(index uint16, sig []byte) error { // VerifySignatureShare verifies the partial signature of the signer with specified position func (bnm *BelNevMock) VerifySignatureShare(index uint16, sig []byte, msg []byte, bitmap []byte) error { - return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + if bnm.VerifySignatureShareMock(index, sig, msg, bitmap) != nil { + return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + } + + if bytes.Equal([]byte("signature share"), sig) { + return nil + } + + return crypto.ErrSigNotValid } // AggregateSigs aggregates all collected partial signatures func (bnm *BelNevMock) AggregateSigs(bitmap []byte) ([]byte, error) { - return bnm.AggregateSigsMock(bitmap) + if bnm.AggregateSigsMock != nil { + return bnm.AggregateSigsMock(bitmap) + } + + if bitmap == nil { + return nil, crypto.ErrNilBitmap + } + + return []byte("aggregated signature"), nil } // SignatureShare diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..c49aeb99563 --- /dev/null +++ b/process/mock/nodesCoordinatorMock.go @@ -0,0 +1,119 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte) (validatorsGroup []sharding.Validator, err error) + ConsensusGroupSizeCalled func() int + SetConsensusGroupSizeCalled func(int) error +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + return &NodesCoordinatorMock{ + ConsensusSize: 1, + ShardId: 0, + NbShards: 1, + Validators: make(map[uint32][]sharding.Validator), + } +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection) + } + + pubKeys := make([]string, 0) + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + if len(ncm.Validators[ncm.ShardId]) < int(ncm.ConsensusSize) { + return nil, sharding.ErrSmallEligibleListSize + } + + for _, v := range ncm.Validators[ncm.ShardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) LoadNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sharding.Validator, error) { + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess) + } + + if len(ncm.Validators[ncm.ShardId]) < int(ncm.ConsensusSize) { + return nil, sharding.ErrSmallEligibleListSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < ncm.ConsensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[ncm.ShardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize() int { + if ncm.ConsensusGroupSizeCalled != nil { + return ncm.ConsensusGroupSizeCalled() + } + + return int(ncm.ConsensusSize) +} + +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(size int) error { + ncm.ConsensusSize = uint32(size) + + return nil +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index c2289893ca0..8ddd2cb84bc 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -17,8 +17,8 @@ type indexHashedNodesCoordinator struct { consensusGroupSize int } -// NewIndexHashedGroupSelector creates a new index hashed group selector -func NewIndexHashedGroupSelector( +// NewIndexHashedNodesCoordinator creates a new index hashed group selector +func NewIndexHashedNodesCoordinator( consensusGroupSize int, hasher hashing.Hasher, shardId uint32, diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 7610b7f5032..e2fe103453c 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -22,12 +22,12 @@ func uint64ToBytes(value uint64) []byte { return buff } -//------- NewIndexHashedGroupSelector +//------- NewIndexHashedNodesCoordinator func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedGroupSelector(1, nil, 0, 1) + ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, nil, 0, 1) assert.Nil(t, ihgs) assert.Equal(t, sharding.ErrNilHasher, err) @@ -36,7 +36,7 @@ func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedGroupSelector(0, mock.HasherMock{}, 0, 1) + ihgs, err := sharding.NewIndexHashedNodesCoordinator(0, mock.HasherMock{}, 0, 1) assert.Nil(t, ihgs) assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) @@ -45,7 +45,7 @@ func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testi func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) assert.NotNil(t, ihgs) assert.Nil(t, err) @@ -56,7 +56,7 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.LoadNodesPerShards(nil)) } @@ -64,7 +64,7 @@ func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -84,7 +84,7 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) list := make([]sharding.Validator, 0) @@ -97,7 +97,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testin func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(10, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -117,7 +117,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *te func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(2, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, mock.HasherMock{}, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -139,7 +139,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedGroupSelector(1, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -177,7 +177,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi return nil } - ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -216,7 +216,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd return nil } - ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) @@ -259,7 +259,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex return nil } - ihgs, _ := sharding.NewIndexHashedGroupSelector(2, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), @@ -310,7 +310,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho return convertBigIntToBytes(val) } - ihgs, _ := sharding.NewIndexHashedGroupSelector(6, hasher, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(6, hasher, 0, 1) validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) @@ -357,7 +357,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { consensusGroupSize := 21 - ihgs, _ := sharding.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(consensusGroupSize, mock.HasherMock{}, 0, 1) list := make([]sharding.Validator, 0) From fc279bd6f168a0fc08930e657907cbff6d3b8e91 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 10 Jul 2019 10:15:12 +0300 Subject: [PATCH 015/234] pushing unit tests. --- data/address/specialAddresses.go | 2 +- process/coordinator/transactionType_test.go | 71 +++++++++++++++++++++ process/interface.go | 2 +- process/mock/specialAddressHandlerMock.go | 2 +- process/unsigned/feeTxHandler.go | 2 +- 5 files changed, 75 insertions(+), 4 deletions(-) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 19143ce1ebb..d8b109262b2 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -49,7 +49,7 @@ func (sp *specialAddresses) ElrondCommunityAddress() []byte { } // OwnAddress provides own address -func (sp *specialAddresses) OwnAddress() []byte { +func (sp *specialAddresses) LeaderAddress() []byte { return sp.ownAddress } diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index 2636fe66b9e..07a4d9bdf7b 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -2,6 +2,7 @@ package coordinator import ( "crypto/rand" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" @@ -11,6 +12,58 @@ import ( "testing" ) +func TestNewTxTypeHandler_NilAddrConv(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + nil, + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewTxTypeHandler_NilShardCoord(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + nil, + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewTxTypeHandler_NilAccounts(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewTxTypeHandler_ValsOk(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) +} + func generateRandomByteSlice(size int) []byte { buff := make([]byte, size) _, _ = rand.Reader.Read(buff) @@ -188,3 +241,21 @@ func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { assert.Nil(t, err) assert.Equal(t, process.MoveBalance, txType) } + +func TestTxTypeHandler_ComputeTransactionTypeTxFee(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &feeTx.FeeTx{} + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.TxFee, txType) +} diff --git a/process/interface.go b/process/interface.go index f5d260a537a..6ffe91dfcd4 100644 --- a/process/interface.go +++ b/process/interface.go @@ -83,7 +83,7 @@ type UnsignedTxHandler interface { // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { ElrondCommunityAddress() []byte - OwnAddress() []byte + LeaderAddress() []byte ShardIdForAddress([]byte) uint32 } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index ae7ef1a75f8..734df59496d 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -14,7 +14,7 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { return sh.ElrondCommunityAddressCalled() } -func (sh *SpecialAddressHandlerMock) OwnAddress() []byte { +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { if sh.OwnAddressCalled == nil { return []byte("leader") } diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index c40d4c76ea8..9341f469a6b 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -139,7 +139,7 @@ func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { currTx := &feeTx.FeeTx{} currTx.Value = getPercentageOfValue(totalGathered, leaderPercentage) - currTx.RcvAddr = ftxh.address.OwnAddress() + currTx.RcvAddr = ftxh.address.LeaderAddress() return currTx } From 5b5f441a39057a82aeba358241f0243857dd753e Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 10 Jul 2019 10:29:11 +0300 Subject: [PATCH 016/234] tests: printfs should end in newline for tests --- .../singleShard/block/interceptedRequestHdr_test.go | 2 +- process/sync/shardblock_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index e262bdd12b3..f89bf5f4d71 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -131,7 +131,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { MiniBlockHeaders: make([]block.MiniBlockHeader, 0), } - hdrBuff, _ := marshalizer.Marshal(&hdr) + hdrBuff, _ := marshalizer.Marshal(&hdr1) msig, _ := multiSigner.Create(shard0PubKeys, 0) bitmap := []byte{1, 0, 0} _, _ = msig.CreateSignatureShare(hdrBuff, bitmap) diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 939487717b5..129f7c07d3c 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -4767,7 +4767,7 @@ func getBlockBody(header data.HeaderHandler) (data.BodyHandler, error) { } func applyNotarisedBlock(nonce uint64, unitType dataRetriever.UnitType) error { - fmt.Printf("apply block with nonce %d in unit type %d", nonce, unitType) + fmt.Printf("apply block with nonce %d in unit type %d\n", nonce, unitType) return nil } @@ -4776,7 +4776,7 @@ func removeBlockBody( hdrNonceHashDataUnit dataRetriever.UnitType, blockUnit dataRetriever.UnitType, ) error { - fmt.Printf("remove block body with nonce %d with hdr nonce hash data unit type %d and block unit type %d", + fmt.Printf("remove block body with nonce %d with hdr nonce hash data unit type %d and block unit type %d\n", nonce, hdrNonceHashDataUnit, blockUnit) From 9a67d3cd4911cc3d65943bc4f8501d2de8632df3 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 10 Jul 2019 11:31:47 +0300 Subject: [PATCH 017/234] pushing unit tests. --- process/unsigned/feeTxHandler.go | 7 +- process/unsigned/feeTxHandler_test.go | 288 ++++++++++++++++++++++++++ 2 files changed, 294 insertions(+), 1 deletion(-) diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 9341f469a6b..5876ab9de4a 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -104,6 +104,7 @@ func (ftxh *feeTxHandler) AddTxFeeFromBlock(tx data.TransactionHandler) { currFeeTx, ok := tx.(*feeTx.FeeTx) if !ok { log.Debug(process.ErrWrongTypeAssertion.Error()) + return } ftxh.mutTxs.Lock() @@ -116,6 +117,7 @@ func (ftxh *feeTxHandler) AddProcessedUTx(tx data.TransactionHandler) { currFeeTx, ok := tx.(*feeTx.FeeTx) if !ok { log.Debug(process.ErrWrongTypeAssertion.Error()) + return } ftxh.mutTxs.Lock() @@ -197,7 +199,10 @@ func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { for _, value := range calculatedFeeTxs { totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - commTxFromBlock := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] + commTxFromBlock, ok := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] + if !ok { + return process.ErrTxsFeesDoesNotMatch + } if commTxFromBlock.Value.Cmp(value.GetValue()) != 0 { return process.ErrTxsFeesDoesNotMatch } diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/feeTxHandler_test.go index 02e9d600f77..4f522855590 100644 --- a/process/unsigned/feeTxHandler_test.go +++ b/process/unsigned/feeTxHandler_test.go @@ -1 +1,289 @@ package unsigned + +import ( + "github.com/ElrondNetwork/elrond-go/data/feeTx" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" + "math/big" + "testing" +) + +func TestNewFeeTxHandler_NilSpecialAddress(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilSpecialAddressHandler, err) +} + +func TestNewFeeTxHandler_NilHasher(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewFeeTxHandler_NilMarshalizer(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + nil, + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewFeeTxHandler_ValsOk(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) +} + +func TestFeeTxHandler_AddIntermediateTransactions(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.AddIntermediateTransactions(nil) + assert.Nil(t, err) +} + +func TestFeeTxHandler_AddProcessedUTx(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.AddProcessedUTx(nil) + assert.Equal(t, 0, len(th.feeTxs)) + + th.AddProcessedUTx(&transaction.Transaction{}) + assert.Equal(t, 0, len(th.feeTxs)) + + th.AddProcessedUTx(&feeTx.FeeTx{}) + assert.Equal(t, 1, len(th.feeTxs)) +} + +func TestFeeTxHandler_AddTxFeeFromBlock(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.AddTxFeeFromBlock(nil) + assert.Equal(t, 0, len(th.feeTxsFromBlock)) + + th.AddTxFeeFromBlock(&transaction.Transaction{}) + assert.Equal(t, 0, len(th.feeTxsFromBlock)) + + th.AddTxFeeFromBlock(&feeTx.FeeTx{}) + assert.Equal(t, 1, len(th.feeTxsFromBlock)) +} + +func TestFeeTxHandler_CleanProcessedUTxs(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.AddProcessedUTx(&feeTx.FeeTx{}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{}) + assert.Equal(t, 1, len(th.feeTxs)) + assert.Equal(t, 1, len(th.feeTxsFromBlock)) + + th.CleanProcessedUTxs() + assert.Equal(t, 0, len(th.feeTxs)) + assert.Equal(t, 0, len(th.feeTxsFromBlock)) +} + +func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := th.CreateAllUTxs() + assert.Equal(t, 0, len(txs)) + + currTxFee := big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + + txs = th.CreateAllUTxs() + assert.Equal(t, 2, len(txs)) + + totalSum := txs[0].GetValue().Uint64() + totalSum += txs[1].GetValue().Uint64() + + assert.Equal(t, currTxFee.Uint64()/2, totalSum) +} + +func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { + t.Parallel() + + addr := &mock.SpecialAddressHandlerMock{} + th, err := NewFeeTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.VerifyCreatedUTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + + err = th.VerifyCreatedUTxs() + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + badValue := big.NewInt(100) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) + + err = th.VerifyCreatedUTxs() + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + th.CleanProcessedUTxs() + + currTxFee = big.NewInt(50) + halfCurrTxFee := big.NewInt(25) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) + + err = th.VerifyCreatedUTxs() + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + th.CleanProcessedUTxs() + + currTxFee = big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) + + err = th.VerifyCreatedUTxs() + assert.Nil(t, err) +} + +func TestFeeTxHandler_CreateAllInterMiniBlocks(t *testing.T) { + t.Parallel() + + th, err := NewFeeTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + mbs := th.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) + + currTxFee := big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + + mbs = th.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) +} + +func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { + t.Parallel() + + addr := &mock.SpecialAddressHandlerMock{} + th, err := NewFeeTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.VerifyInterMiniBlocks(nil) + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + + err = th.VerifyInterMiniBlocks(nil) + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + badValue := big.NewInt(100) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) + + err = th.VerifyInterMiniBlocks(nil) + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + th.CleanProcessedUTxs() + + currTxFee = big.NewInt(50) + halfCurrTxFee := big.NewInt(25) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) + + err = th.VerifyInterMiniBlocks(nil) + assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + + th.CleanProcessedUTxs() + + currTxFee = big.NewInt(50) + th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) +} From c821e625e5f09945860478f6380105ba1063864e Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 10 Jul 2019 11:45:50 +0300 Subject: [PATCH 018/234] pushing unit tests. --- process/transaction/process_test.go | 52 +++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 4028a2f2038..b67c8a83f57 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -981,6 +981,58 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { assert.Equal(t, 4, saveAccountCalled) } +func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + tx := transaction.Transaction{} + tx.Nonce = 4 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(61) + tx.GasPrice = 2 + tx.GasLimit = 2 + + acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + assert.Nil(t, err) + acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + assert.Nil(t, err) + + acntSrc.Nonce = 4 + acntSrc.Balance = big.NewInt(90) + acntDst.Balance = big.NewInt(10) + + accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) + + execTx, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) + + err = execTx.ProcessTransaction(&tx, 4) + assert.Nil(t, err) + assert.Equal(t, uint64(5), acntSrc.Nonce) + assert.Equal(t, big.NewInt(25), acntSrc.Balance) + assert.Equal(t, big.NewInt(71), acntDst.Balance) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) +} + func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { journalizeCalled := 0 saveAccountCalled := 0 From be4f50c41075701c578fd8eedeae461a4ee7f461 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 10 Jul 2019 12:20:50 +0300 Subject: [PATCH 019/234] pushing unit tests. --- cmd/node/factory/structs.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index b254826e68e..4aca20a7c22 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1265,6 +1265,7 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + // TODO: construct this correctly on the PR specialAddressHolder, err := address.NewSpecialAddressHolder( []byte("elrond"), []byte("own"), From c51305cab1dc7f92a70325efa7bc67abaa613e66 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 10 Jul 2019 12:26:36 +0300 Subject: [PATCH 020/234] process, integrationTests: remove obsolete chronology validator --- cmd/node/factory/structs.go | 29 +++++++++++---- .../frontend/wallet/testInitializer.go | 1 - .../mock/chronologyValidatorMock.go | 8 ----- .../multiShard/block/testInitializer.go | 2 -- .../multiShard/metablock/testInitializer.go | 2 -- .../multiShard/transaction/testInitializer.go | 1 - .../singleShard/block/testInitializer.go | 1 - .../transaction/testInitializer.go | 1 - process/block/interceptedBlockHeader.go | 18 +--------- process/block/interceptedBlockHeader_test.go | 25 ------------- process/block/interceptedMetaBlockHeader.go | 18 +--------- .../block/interceptedMetaBlockHeader_test.go | 22 ------------ .../block/interceptors/headerInterceptor.go | 2 -- .../interceptors/headerInterceptorBase.go | 7 ---- .../headerInterceptorBase_test.go | 26 ++------------ .../interceptors/headerInterceptor_test.go | 29 ++------------- .../metachainHeaderInterceptor.go | 8 +---- .../metachainHeaderInterceptor_test.go | 35 ++----------------- .../metachain/interceptorsContainerFactory.go | 8 ----- .../interceptorsContainerFactory_test.go | 15 -------- .../shard/interceptorsContainerFactory.go | 8 ----- .../interceptorsContainerFactory_test.go | 24 ------------- process/interface.go | 6 ---- process/metablock/shardHeaderInterceptor.go | 2 -- .../metablock/shardHeaderInterceptor_test.go | 28 ++------------- process/mock/chronologyValidatorStub.go | 9 ----- 26 files changed, 37 insertions(+), 298 deletions(-) delete mode 100644 integrationTests/mock/chronologyValidatorMock.go delete mode 100644 process/mock/chronologyValidatorStub.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index ae2aea102bc..32964d48660 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -196,7 +196,11 @@ func NewStateComponentsFactoryArgs( // StateComponentsFactory creates the state components func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { - addressConverter, err := addressConverters.NewPlainAddressConverter(args.config.Address.Length, args.config.Address.Prefix) + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.config.Address.Length, + args.config.Address.Prefix, + ) + if err != nil { return nil, errors.New("could not create address converter: " + err.Error()) } @@ -231,7 +235,12 @@ type dataComponentsFactoryArgs struct { } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components -func NewDataComponentsFactoryArgs(config *config.Config, shardCoordinator sharding.Coordinator, core *Core, uniqueID string) *dataComponentsFactoryArgs { +func NewDataComponentsFactoryArgs( + config *config.Config, + shardCoordinator sharding.Coordinator, + core *Core, + uniqueID string, +) *dataComponentsFactoryArgs { return &dataComponentsFactoryArgs{ config: config, shardCoordinator: shardCoordinator, @@ -476,8 +485,18 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - blockProcessor, blockTracker, err := newBlockProcessorAndTracker(resolversFinder, args.shardCoordinator, - args.data, args.core, args.state, forkDetector, shardsGenesisBlocks, args.nodesConfig, args.coreServiceContainer) + blockProcessor, blockTracker, err := newBlockProcessorAndTracker( + resolversFinder, + args.shardCoordinator, + args.data, + args.core, + args.state, + forkDetector, + shardsGenesisBlocks, + args.nodesConfig, + args.coreServiceContainer, + ) + if err != nil { return nil, err } @@ -1084,7 +1103,6 @@ func newShardInterceptorAndResolverContainerFactory( crypto.MultiSigner, data.Datapool, state.AddressConverter, - &nullChronologyValidator{}, ) if err != nil { return nil, nil, err @@ -1129,7 +1147,6 @@ func newMetaInterceptorAndResolverContainerFactory( core.Hasher, crypto.MultiSigner, data.MetaDatapool, - &nullChronologyValidator{}, ) if err != nil { return nil, nil, err diff --git a/integrationTests/frontend/wallet/testInitializer.go b/integrationTests/frontend/wallet/testInitializer.go index 44910cc06a0..c00160b4e63 100644 --- a/integrationTests/frontend/wallet/testInitializer.go +++ b/integrationTests/frontend/wallet/testInitializer.go @@ -163,7 +163,6 @@ func createNetNode( multiSigner, dPool, addrConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, _ := interceptorContainerFactory.Create() diff --git a/integrationTests/mock/chronologyValidatorMock.go b/integrationTests/mock/chronologyValidatorMock.go deleted file mode 100644 index 6b9e34b315c..00000000000 --- a/integrationTests/mock/chronologyValidatorMock.go +++ /dev/null @@ -1,8 +0,0 @@ -package mock - -type ChronologyValidatorMock struct { -} - -func (cvm *ChronologyValidatorMock) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil -} diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index abf3fae1e11..5163cf0d9ac 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -231,7 +231,6 @@ func createNetNode( testMultiSig, dPool, testAddressConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -703,7 +702,6 @@ func createMetaNetNode( testHasher, testMultiSig, dPool, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 5ad66aabe37..143317921b4 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -343,7 +343,6 @@ func createShardNetNode( testMultiSig, dPool, addConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -525,7 +524,6 @@ func createMetaNetNode( testHasher, testMultiSig, dPool, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { diff --git a/integrationTests/multiShard/transaction/testInitializer.go b/integrationTests/multiShard/transaction/testInitializer.go index f41e7800c8b..47d221ef87c 100644 --- a/integrationTests/multiShard/transaction/testInitializer.go +++ b/integrationTests/multiShard/transaction/testInitializer.go @@ -250,7 +250,6 @@ func createNetNode( multiSigner, dPool, addrConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, _ := interceptorContainerFactory.Create() diff --git a/integrationTests/singleShard/block/testInitializer.go b/integrationTests/singleShard/block/testInitializer.go index d2d834a094b..223ec7210a7 100644 --- a/integrationTests/singleShard/block/testInitializer.go +++ b/integrationTests/singleShard/block/testInitializer.go @@ -195,7 +195,6 @@ func createNetNode( multiSigner, dPool, addrConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, _ := interceptorContainerFactory.Create() diff --git a/integrationTests/singleShard/transaction/testInitializer.go b/integrationTests/singleShard/transaction/testInitializer.go index 94d6f8dc11c..05b454b3ca5 100644 --- a/integrationTests/singleShard/transaction/testInitializer.go +++ b/integrationTests/singleShard/transaction/testInitializer.go @@ -203,7 +203,6 @@ func createNetNode( multiSigner, dPool, addrConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, _ := interceptorContainerFactory.Create() diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index a4aae581f79..b2461eb39f2 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -13,7 +13,6 @@ import ( type InterceptedHeader struct { *block.Header multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator hash []byte nodesCoordinator sharding.NodesCoordinator marshalizer marshal.Marshalizer @@ -22,7 +21,6 @@ type InterceptedHeader struct { // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, nodesCoordinator sharding.NodesCoordinator, marshalizer marshal.Marshalizer, ) *InterceptedHeader { @@ -30,7 +28,6 @@ func NewInterceptedHeader( return &InterceptedHeader{ Header: &block.Header{}, multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, nodesCoordinator: nodesCoordinator, marshalizer: marshalizer, } @@ -68,7 +65,7 @@ func (inHdr *InterceptedHeader) IntegrityAndValidity(coordinator sharding.Coordi return err } - return inHdr.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -113,19 +110,6 @@ func (inHdr *InterceptedHeader) Integrity(coordinator sharding.Coordinator) erro } } -func (inHdr *InterceptedHeader) validityCheck() error { - if inHdr.chronologyValidator == nil { - return process.ErrNilChronologyValidator - } - - return inHdr.chronologyValidator.ValidateReceivedBlock( - inHdr.ShardId, - inHdr.Epoch, - inHdr.Nonce, - inHdr.Round, - ) -} - // VerifySig verifies a signature func (inHdr *InterceptedHeader) VerifySig() error { randSeed := inHdr.GetPrevRandSeed() diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index aa8fea0d37f..be738d67779 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -14,11 +14,6 @@ func createTestInterceptedHeader() *block.InterceptedHeader { return block.NewInterceptedHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - }, &mock.NodesCoordinatorMock{}, &mock.MarshalizerMock{Fail: false}, ) @@ -248,26 +243,6 @@ func TestInterceptedHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr(t * assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedHeader( - mock.NewMultiSigner(), - nil, - &mock.NodesCoordinatorMock{}, - &mock.MarshalizerMock{Fail: false}, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyType = block2.PeerBlock - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index bd4e8c9e3c0..5b2f7e3aa08 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -12,20 +12,17 @@ import ( type InterceptedMetaHeader struct { *block.MetaBlock multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator hash []byte } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedMetaHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, ) *InterceptedMetaHeader { return &InterceptedMetaHeader{ MetaBlock: &block.MetaBlock{}, multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, } } @@ -51,7 +48,7 @@ func (imh *InterceptedMetaHeader) IntegrityAndValidity(coordinator sharding.Coor return err } - return imh.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -98,19 +95,6 @@ func (imh *InterceptedMetaHeader) Integrity(coordinator sharding.Coordinator) er return nil } -func (imh *InterceptedMetaHeader) validityCheck() error { - if imh.chronologyValidator == nil { - return process.ErrNilChronologyValidator - } - - return imh.chronologyValidator.ValidateReceivedBlock( - sharding.MetachainShardId, - imh.Epoch, - imh.Nonce, - imh.Round, - ) -} - // VerifySig verifies a signature func (imh *InterceptedMetaHeader) VerifySig() error { // TODO: Check block signature after multisig will be implemented diff --git a/process/block/interceptedMetaBlockHeader_test.go b/process/block/interceptedMetaBlockHeader_test.go index a347338f704..2b989b86e50 100644 --- a/process/block/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedMetaBlockHeader_test.go @@ -13,11 +13,6 @@ import ( func createTestInterceptedMetaHeader() *block.InterceptedMetaHeader { return block.NewInterceptedMetaHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - }, ) } @@ -283,23 +278,6 @@ func TestInterceptedMetaHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedMetaHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedMetaHeader( - mock.NewMultiSigner(), - nil, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedMetaHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go index 7abf703fd56..bad02ade911 100644 --- a/process/block/interceptors/headerInterceptor.go +++ b/process/block/interceptors/headerInterceptor.go @@ -31,7 +31,6 @@ func NewHeaderInterceptor( hasher hashing.Hasher, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, - chronologyValidator process.ChronologyValidator, ) (*HeaderInterceptor, error) { if headersNonces == nil { @@ -47,7 +46,6 @@ func NewHeaderInterceptor( hasher, shardCoordinator, nodesCoordinator, - chronologyValidator, ) if err != nil { return nil, err diff --git a/process/block/interceptors/headerInterceptorBase.go b/process/block/interceptors/headerInterceptorBase.go index 4fbc2ba49fc..50fd40571f8 100644 --- a/process/block/interceptors/headerInterceptorBase.go +++ b/process/block/interceptors/headerInterceptorBase.go @@ -19,7 +19,6 @@ type HeaderInterceptorBase struct { hasher hashing.Hasher shardCoordinator sharding.Coordinator nodesCoordinator sharding.NodesCoordinator - chronologyValidator process.ChronologyValidator } // NewHeaderInterceptorBase creates a new HeaderIncterceptorBase instance @@ -30,7 +29,6 @@ func NewHeaderInterceptorBase( hasher hashing.Hasher, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, - chronologyValidator process.ChronologyValidator, ) (*HeaderInterceptorBase, error) { if marshalizer == nil { return nil, process.ErrNilMarshalizer @@ -50,9 +48,6 @@ func NewHeaderInterceptorBase( if nodesCoordinator == nil { return nil, process.ErrNilNodesCoordinator } - if chronologyValidator == nil { - return nil, process.ErrNilChronologyValidator - } hdrIntercept := &HeaderInterceptorBase{ marshalizer: marshalizer, @@ -61,7 +56,6 @@ func NewHeaderInterceptorBase( hasher: hasher, shardCoordinator: shardCoordinator, nodesCoordinator: nodesCoordinator, - chronologyValidator: chronologyValidator, } return hdrIntercept, nil @@ -79,7 +73,6 @@ func (hib *HeaderInterceptorBase) ParseReceivedMessage(message p2p.MessageP2P) ( hdrIntercepted := block.NewInterceptedHeader( hib.multiSigVerifier, - hib.chronologyValidator, hib.nodesCoordinator, hib.marshalizer, ) diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index 4a9841ae8cc..3c0ebf197fc 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -29,7 +29,6 @@ func TestNewHeaderInterceptorBase_NilMarshalizerShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -46,7 +45,6 @@ func TestNewHeaderInterceptorBase_NilStorerShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHeadersStorage, err) @@ -64,7 +62,6 @@ func TestNewHeaderInterceptorBase_NilMultiSignerShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, hi) @@ -82,7 +79,6 @@ func TestNewHeaderInterceptorBase_NilHasherShouldErr(t *testing.T) { nil, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -100,7 +96,6 @@ func TestNewHeaderInterceptorBase_NilShardCoordinatorShouldErr(t *testing.T) { mock.HasherMock{}, nil, mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -118,7 +113,6 @@ func TestNewHeaderInterceptorBase_NilNodesCoordinatorShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilNodesCoordinator, err) @@ -136,7 +130,6 @@ func TestNewHeaderInterceptorBase_OkValsShouldWork(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, err) @@ -156,7 +149,6 @@ func TestHeaderInterceptorBase_ParseReceivedMessageNilMessageShouldErr(t *testin mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) hdr, err := hib.ParseReceivedMessage(nil) @@ -176,7 +168,6 @@ func TestHeaderInterceptorBase_ParseReceivedMessageNilDataToProcessShouldErr(t * mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) msg := &mock.P2PMessageMock{} @@ -203,7 +194,6 @@ func TestHeaderInterceptorBase_ParseReceivedMessageMarshalizerErrorsAtUnmarshali mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) msg := &mock.P2PMessageMock{ @@ -221,11 +211,6 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t storer := &mock.StorerStub{} marshalizer := &mock.MarshalizerMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } nodesCoordinator := mock.NewNodesCoordinatorMock() hib, _ := interceptors.NewHeaderInterceptorBase( @@ -235,10 +220,9 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -281,11 +265,6 @@ func TestHeaderInterceptorBase_ParseReceivedMessageValsOkShouldWork(t *testing.T marshalizer := &mock.MarshalizerMock{} testedNonce := uint64(67) multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) error { return errors.New("Key not found") @@ -300,10 +279,9 @@ func TestHeaderInterceptorBase_ParseReceivedMessageValsOkShouldWork(t *testing.T mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 1bea9e9dfeb..8e0ab222405 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -64,7 +64,6 @@ func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -86,7 +85,6 @@ func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -108,7 +106,6 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) @@ -131,7 +128,6 @@ func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, err) @@ -156,7 +152,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing. mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMessage, hi.ProcessReceivedMessage(nil)) @@ -173,11 +168,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { if u == testedNonce { @@ -204,10 +194,9 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -255,11 +244,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { if u == testedNonce { @@ -287,10 +271,9 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -333,11 +316,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { if u == testedNonce { @@ -372,10 +350,9 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( mock.HasherMock{}, shardCoordinator, nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) diff --git a/process/block/interceptors/metachainHeaderInterceptor.go b/process/block/interceptors/metachainHeaderInterceptor.go index b7f930143c5..3b0ed1c8bd3 100644 --- a/process/block/interceptors/metachainHeaderInterceptor.go +++ b/process/block/interceptors/metachainHeaderInterceptor.go @@ -22,7 +22,6 @@ type MetachainHeaderInterceptor struct { multiSigVerifier crypto.MultiSigVerifier hasher hashing.Hasher shardCoordinator sharding.Coordinator - chronologyValidator process.ChronologyValidator } // NewMetachainHeaderInterceptor hooks a new interceptor for metachain block headers @@ -35,7 +34,6 @@ func NewMetachainHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, - chronologyValidator process.ChronologyValidator, ) (*MetachainHeaderInterceptor, error) { if marshalizer == nil { @@ -59,9 +57,6 @@ func NewMetachainHeaderInterceptor( if shardCoordinator == nil { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil { - return nil, process.ErrNilChronologyValidator - } return &MetachainHeaderInterceptor{ messageChecker: &messageChecker{}, @@ -71,7 +66,6 @@ func NewMetachainHeaderInterceptor( multiSigVerifier: multiSigVerifier, hasher: hasher, shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, metachainHeadersNonces: metachainHeadersNonces, }, nil } @@ -84,7 +78,7 @@ func (mhi *MetachainHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } - metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier, mhi.chronologyValidator) + metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier) err = mhi.marshalizer.Unmarshal(metaHdrIntercepted, message.Data()) if err != nil { return err diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index 20c25ea4dde..98ab5bd9d21 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -30,7 +30,6 @@ func TestNewMetachainHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -50,7 +49,6 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMetaHeadersDataPool, err) @@ -70,7 +68,6 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersNoncesShouldErr(t *tes mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMetaHeadersNoncesDataPool, err) @@ -90,7 +87,6 @@ func TestNewMetachainHeaderInterceptor_NilMetachainStorerShouldErr(t *testing.T) mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMetaHeadersStorage, err) @@ -111,7 +107,6 @@ func TestNewMetachainHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, mhi) @@ -132,7 +127,6 @@ func TestNewMetachainHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -153,7 +147,6 @@ func TestNewMetachainHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -174,7 +167,6 @@ func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, err) @@ -197,7 +189,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMessage, mhi.ProcessReceivedMessage(nil)) @@ -217,7 +208,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilDataToProcessShould mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) msg := &mock.P2PMessageMock{} @@ -244,7 +234,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnm mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, ) msg := &mock.P2PMessageMock{ @@ -261,11 +250,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul metachainStorer := &mock.StorerStub{} marshalizer := &mock.MarshalizerMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, @@ -274,10 +258,9 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -300,11 +283,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te }, } multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, @@ -313,10 +291,9 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) hdr.PubKeysBitmap = make([]byte, 0) @@ -370,11 +347,6 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd chanDone := make(chan struct{}, 1) testedNonce := uint64(67) multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } metachainHeaders := &mock.CacherStub{} metachainHeadersNonces := &mock.Uint64CacherStub{} metachainStorer := &mock.StorerStub{ @@ -390,10 +362,9 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) hdr.PubKeysBitmap = make([]byte, 0) diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index b64c3c91f37..2733f4094e7 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -23,7 +23,6 @@ type interceptorsContainerFactory struct { nodesCoordinator sharding.NodesCoordinator messenger process.TopicHandler multiSigner crypto.MultiSigner - chronologyValidator process.ChronologyValidator tpsBenchmark *statistics.TpsBenchmark } @@ -37,7 +36,6 @@ func NewInterceptorsContainerFactory( hasher hashing.Hasher, multiSigner crypto.MultiSigner, dataPool dataRetriever.MetaPoolsHolder, - chronologyValidator process.ChronologyValidator, ) (*interceptorsContainerFactory, error) { if shardCoordinator == nil { @@ -64,9 +62,6 @@ func NewInterceptorsContainerFactory( if dataPool == nil { return nil, process.ErrNilDataPoolHolder } - if chronologyValidator == nil { - return nil, process.ErrNilChronologyValidator - } return &interceptorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -77,7 +72,6 @@ func NewInterceptorsContainerFactory( hasher: hasher, multiSigner: multiSigner, dataPool: dataPool, - chronologyValidator: chronologyValidator, }, nil } @@ -134,7 +128,6 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, ) if err != nil { return nil, nil, err @@ -182,7 +175,6 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(identif icf.hasher, icf.shardCoordinator, icf.nodesCoordinator, - icf.chronologyValidator, ) if err != nil { return nil, err diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 442ae071b69..15469b42ef3 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -88,7 +88,6 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -107,7 +106,6 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -126,7 +124,6 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -145,7 +142,6 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -164,7 +160,6 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -183,7 +178,6 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { nil, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -202,7 +196,6 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { &mock.HasherMock{}, nil, createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -221,7 +214,6 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -240,7 +232,6 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.NotNil(t, icf) @@ -261,7 +252,6 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -282,7 +272,6 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -303,7 +292,6 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -324,7 +312,6 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -352,7 +339,6 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -392,7 +378,6 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, _ := icf.Create() diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 8777fab1a55..67732b7a3b9 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -27,7 +27,6 @@ type interceptorsContainerFactory struct { dataPool dataRetriever.PoolsHolder addrConverter state.AddressConverter nodesCoordinator sharding.NodesCoordinator - chronologyValidator process.ChronologyValidator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -43,7 +42,6 @@ func NewInterceptorsContainerFactory( multiSigner crypto.MultiSigner, dataPool dataRetriever.PoolsHolder, addrConverter state.AddressConverter, - chronologyValidator process.ChronologyValidator, ) (*interceptorsContainerFactory, error) { if shardCoordinator == nil { @@ -79,9 +77,6 @@ func NewInterceptorsContainerFactory( if nodesCoordinator == nil { return nil, process.ErrNilNodesCoordinator } - if chronologyValidator == nil { - return nil, process.ErrNilChronologyValidator - } return &interceptorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -95,7 +90,6 @@ func NewInterceptorsContainerFactory( multiSigner: multiSigner, dataPool: dataPool, addrConverter: addrConverter, - chronologyValidator: chronologyValidator, }, nil } @@ -304,7 +298,6 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p icf.hasher, icf.shardCoordinator, icf.nodesCoordinator, - icf.chronologyValidator, ) if err != nil { return nil, nil, err @@ -399,7 +392,6 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, ) if err != nil { return nil, nil, err diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 78a7cf98294..9dff335ca7e 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -96,7 +96,6 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -118,7 +117,6 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -140,7 +138,6 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -162,7 +159,6 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -184,7 +180,6 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -206,7 +201,6 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -228,7 +222,6 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -250,7 +243,6 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -272,7 +264,6 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { nil, createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -294,7 +285,6 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -316,7 +306,6 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -338,7 +327,6 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.NotNil(t, icf) @@ -362,7 +350,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -386,7 +373,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -410,7 +396,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -434,7 +419,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -458,7 +442,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -482,7 +465,6 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -506,7 +488,6 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -530,7 +511,6 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -554,7 +534,6 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -578,7 +557,6 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -609,7 +587,6 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -652,7 +629,6 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, _ := icf.Create() diff --git a/process/interface.go b/process/interface.go index ad65706dc25..d3b647c96a1 100644 --- a/process/interface.go +++ b/process/interface.go @@ -156,12 +156,6 @@ type TopicMessageHandler interface { TopicHandler } -// ChronologyValidator defines the functionality needed to validate a received header block (shard or metachain) -// from chronology point of view -type ChronologyValidator interface { - ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint32) error -} - // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { PackDataInChunks(data [][]byte, limit int) ([][]byte, error) diff --git a/process/metablock/shardHeaderInterceptor.go b/process/metablock/shardHeaderInterceptor.go index bba8aef1e0f..9a53939c570 100644 --- a/process/metablock/shardHeaderInterceptor.go +++ b/process/metablock/shardHeaderInterceptor.go @@ -37,7 +37,6 @@ func NewShardHeaderInterceptor( hasher hashing.Hasher, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, - chronologyValidator process.ChronologyValidator, ) (*ShardHeaderInterceptor, error) { if headers == nil { @@ -54,7 +53,6 @@ func NewShardHeaderInterceptor( hasher, shardCoordinator, nodesCoordinator, - chronologyValidator, ) if err != nil { return nil, err diff --git a/process/metablock/shardHeaderInterceptor_test.go b/process/metablock/shardHeaderInterceptor_test.go index c812eab2dee..641ef867763 100644 --- a/process/metablock/shardHeaderInterceptor_test.go +++ b/process/metablock/shardHeaderInterceptor_test.go @@ -32,7 +32,6 @@ func TestNewShardHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), &mock.NodesCoordinatorMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -52,7 +51,6 @@ func TestNewShardHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), &mock.NodesCoordinatorMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -73,7 +71,6 @@ func TestNewShardHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), &mock.NodesCoordinatorMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, err) @@ -96,7 +93,6 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *tes mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), &mock.NodesCoordinatorMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Equal(t, process.ErrNilMessage, hi.ProcessReceivedMessage(nil)) @@ -110,11 +106,6 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testin testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) error { return errors.New("Key not found") @@ -138,10 +129,9 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testin mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -183,11 +173,6 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) error { return errors.New("Key not found") @@ -204,10 +189,9 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T mock.HasherMock{}, mock.NewMultiShardsCoordinatorMock(2), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -255,11 +239,6 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) error { return nil @@ -276,10 +255,9 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), nodesCoordinator, - chronologyValidator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) diff --git a/process/mock/chronologyValidatorStub.go b/process/mock/chronologyValidatorStub.go deleted file mode 100644 index 1d64671e584..00000000000 --- a/process/mock/chronologyValidatorStub.go +++ /dev/null @@ -1,9 +0,0 @@ -package mock - -type ChronologyValidatorStub struct { - ValidateReceivedBlockCalled func(shardID uint32, epoch uint32, nonce uint64, round uint32) error -} - -func (cvs *ChronologyValidatorStub) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return cvs.ValidateReceivedBlockCalled(shardID, epoch, nonce, round) -} From 0e35e222460e2d4659d046729b63768d8791f12f Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Wed, 10 Jul 2019 16:29:49 +0300 Subject: [PATCH 021/234] pushing unit tests. --- data/feeTx/feeTx.go | 6 +++--- .../block/executingMiniblocks_test.go | 4 ++-- integrationTests/vm/testInitializer.go | 1 - process/coordinator/process_test.go | 8 ++++---- process/coordinator/transactionType.go | 6 ++++++ process/coordinator/transactionType_test.go | 18 ++++++++++++------ process/smartContract/process.go | 1 + process/transaction/process_test.go | 1 - 8 files changed, 28 insertions(+), 17 deletions(-) diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go index 1482357cee6..0bac83d16ed 100644 --- a/data/feeTx/feeTx.go +++ b/data/feeTx/feeTx.go @@ -81,8 +81,8 @@ func (scr *FeeTx) GetValue() *big.Int { } // GetData returns the data of the fee transaction -func (scr *FeeTx) GetData() []byte { - return nil +func (scr *FeeTx) GetData() string { + return "" } // GetRecvAddress returns the receiver address from the fee transaction @@ -101,7 +101,7 @@ func (scr *FeeTx) SetValue(value *big.Int) { } // SetData sets the data of the fee transaction -func (scr *FeeTx) SetData(data []byte) { +func (scr *FeeTx) SetData(data string) { } // SetRecvAddress sets the receiver address of the fee transaction diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index f8a1048cd4d..52de0834624 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -396,7 +396,7 @@ func generateAndDisseminateTxs( for _, recvPrivateKeys := range receiversPrivateKeys { receiverKey := recvPrivateKeys[i] tx := generateTransferTx(incrementalNonce, senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) - n.SendTransaction( + _, _ = n.SendTransaction( tx.Nonce, hex.EncodeToString(tx.SndAddr), hex.EncodeToString(tx.RcvAddr), @@ -425,7 +425,7 @@ func generateTransferTx( Value: valToTransfer, RcvAddr: skToPk(receiver), SndAddr: skToPk(sender), - Data: make([]byte, 0), + Data: "", GasLimit: gasLimit, GasPrice: gasPrice, } diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index d5fc99571a7..12dea2b9940 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -1,7 +1,6 @@ package vm import ( - "fmt" "encoding/hex" "math/big" "testing" diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 2f1b6953cd0..c4ff6ccb67f 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -1340,13 +1340,13 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot ProcessTransactionCalled: func(trans data.TransactionHandler, round uint32) error { //execution, in this context, means moving the tx nonce to itx corresponding execution result variable tx, _ := trans.(*transaction.Transaction) - if bytes.Equal(tx.Data, txHash1) { + if tx.Data == string(txHash1) { tx1ExecutionResult = tx.Nonce } - if bytes.Equal(tx.Data, txHash2) { + if tx.Data == string(txHash2) { tx2ExecutionResult = tx.Nonce } - if bytes.Equal(tx.Data, txHash3) { + if tx.Data == string(txHash3) { tx3ExecutionResult = tx.Nonce } @@ -1451,7 +1451,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR &mock.RequestHandlerMock{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction data.TransactionHandler, round uint32) error { - if bytes.Equal(transaction.GetData(), txHash2) { + if transaction.GetData() == string(txHash2) { return process.ErrHigherNonceInTransaction } return nil diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index d26f1def618..c163ce862cd 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -3,6 +3,7 @@ package coordinator import ( "bytes" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -46,6 +47,11 @@ func (tc *txTypeHandler) ComputeTransactionType(tx data.TransactionHandler) (pro return process.InvalidTransaction, err } + _, isTxfee := tx.(*feeTx.FeeTx) + if isTxfee { + return process.TxFee, nil + } + isEmptyAddress := tc.isDestAddressEmpty(tx) if isEmptyAddress { if len(tx.GetData()) > 0 { diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index 07a4d9bdf7b..9a8feced409 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -16,8 +16,8 @@ func TestNewTxTypeHandler_NilAddrConv(t *testing.T) { t.Parallel() tth, err := NewTxTypeHandler( - &mock.AddressConverterMock{}, nil, + mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, ) @@ -173,7 +173,7 @@ func TestTxTypeHandler_ComputeTransactionTypeScDeployment(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = make([]byte, addressConverter.AddressLen()) - tx.Data = []byte("data") + tx.Data = "data" tx.Value = big.NewInt(45) txType, err := tth.ComputeTransactionType(tx) @@ -189,7 +189,7 @@ func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = []byte("data") + tx.Data = "data" tx.Value = big.NewInt(45) _, acntDst := createAccounts(tx) @@ -220,7 +220,7 @@ func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = []byte("data") + tx.Data = "data" tx.Value = big.NewInt(45) _, acntDst := createAccounts(tx) @@ -245,8 +245,9 @@ func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { func TestTxTypeHandler_ComputeTransactionTypeTxFee(t *testing.T) { t.Parallel() + addrConv := &mock.AddressConverterMock{} tth, err := NewTxTypeHandler( - &mock.AddressConverterMock{}, + addrConv, mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, ) @@ -254,8 +255,13 @@ func TestTxTypeHandler_ComputeTransactionTypeTxFee(t *testing.T) { assert.NotNil(t, tth) assert.Nil(t, err) - tx := &feeTx.FeeTx{} + tx := &feeTx.FeeTx{RcvAddr: []byte("leader")} txType, err := tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) + assert.Equal(t, process.InvalidTransaction, txType) + + tx = &feeTx.FeeTx{RcvAddr: generateRandomByteSlice(addrConv.AddressLen())} + txType, err = tth.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.TxFee, txType) } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index cb810ae7ce7..51f70e65895 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 00da48224ab..82036c55034 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -13,7 +13,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/smartContract" txproc "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/stretchr/testify/assert" ) From c59753642671475fc3eac3fc72d8f1fb77133804 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 11 Jul 2019 10:38:04 +0300 Subject: [PATCH 022/234] pushing unit tests. --- data/feeTx/capnp/schema.capnp | 2 +- process/coordinator/process.go | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/data/feeTx/capnp/schema.capnp b/data/feeTx/capnp/schema.capnp index a21ed154286..6282c429d29 100644 --- a/data/feeTx/capnp/schema.capnp +++ b/data/feeTx/capnp/schema.capnp @@ -8,7 +8,7 @@ struct FeeTxCapn { nonce @0: UInt64; value @1: Data; rcvAddr @2: Data; - txHash @3: Data; + shardId @3: UInt32; } ##compile with: diff --git a/process/coordinator/process.go b/process/coordinator/process.go index a0a8d9a3ac9..12515558cd3 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -484,12 +484,10 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl // this has to be processed last continue } - go func() { - currMbs := interimProc.CreateAllInterMiniBlocks() - for _, value := range currMbs { - miniBlocks = append(miniBlocks, value) - } - }() + currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { + miniBlocks = append(miniBlocks, value) + } } tc.mutInterimProcessors.Unlock() From 8d56235dba916c44a5640ccd5e03a4c8609f5c2c Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 11 Jul 2019 10:49:03 +0300 Subject: [PATCH 023/234] modified capnproto schema for feeTx --- data/feeTx/capnp/schema.capnp.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/data/feeTx/capnp/schema.capnp.go b/data/feeTx/capnp/schema.capnp.go index dc5edc6f192..185949c0f7c 100644 --- a/data/feeTx/capnp/schema.capnp.go +++ b/data/feeTx/capnp/schema.capnp.go @@ -12,9 +12,9 @@ import ( type FeeTxCapn C.Struct -func NewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStruct(8, 3)) } -func NewRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewRootStruct(8, 3)) } -func AutoNewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStructAR(8, 3)) } +func NewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStruct(16, 2)) } +func NewRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewRootStruct(16, 2)) } +func AutoNewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStructAR(16, 2)) } func ReadRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.Root(0).ToStruct()) } func (s FeeTxCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } func (s FeeTxCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } @@ -22,8 +22,8 @@ func (s FeeTxCapn) Value() []byte { return C.Struct(s).GetObject(0) func (s FeeTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } func (s FeeTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } func (s FeeTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } -func (s FeeTxCapn) TxHash() []byte { return C.Struct(s).GetObject(2).ToData() } -func (s FeeTxCapn) SetTxHash(v []byte) { C.Struct(s).SetObject(2, s.Segment.NewData(v)) } +func (s FeeTxCapn) ShardId() uint32 { return C.Struct(s).Get32(8) } +func (s FeeTxCapn) SetShardId(v uint32) { C.Struct(s).Set32(8, v) } func (s FeeTxCapn) WriteJSON(w io.Writer) error { b := bufio.NewWriter(w) var err error @@ -90,12 +90,12 @@ func (s FeeTxCapn) WriteJSON(w io.Writer) error { if err != nil { return err } - _, err = b.WriteString("\"txHash\":") + _, err = b.WriteString("\"shardId\":") if err != nil { return err } { - s := s.TxHash() + s := s.ShardId() buf, err = json.Marshal(s) if err != nil { return err @@ -183,12 +183,12 @@ func (s FeeTxCapn) WriteCapLit(w io.Writer) error { if err != nil { return err } - _, err = b.WriteString("txHash = ") + _, err = b.WriteString("shardId = ") if err != nil { return err } { - s := s.TxHash() + s := s.ShardId() buf, err = json.Marshal(s) if err != nil { return err @@ -214,7 +214,7 @@ func (s FeeTxCapn) MarshalCapLit() ([]byte, error) { type FeeTxCapn_List C.PointerList func NewFeeTxCapnList(s *C.Segment, sz int) FeeTxCapn_List { - return FeeTxCapn_List(s.NewCompositeList(8, 3, sz)) + return FeeTxCapn_List(s.NewCompositeList(16, 2, sz)) } func (s FeeTxCapn_List) Len() int { return C.PointerList(s).Len() } func (s FeeTxCapn_List) At(i int) FeeTxCapn { return FeeTxCapn(C.PointerList(s).At(i).ToStruct()) } From 62ea880c05e6aa26ef845ef255330b98e2553e28 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 11 Jul 2019 11:01:33 +0300 Subject: [PATCH 024/234] fixing capnproto. --- data/feeTx/feeTx.go | 2 ++ data/feeTx/feeTx_test.go | 1 + 2 files changed, 3 insertions(+) diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go index 0bac83d16ed..1ef6f311eba 100644 --- a/data/feeTx/feeTx.go +++ b/data/feeTx/feeTx.go @@ -54,6 +54,7 @@ func FeeTxCapnToGo(src capnp.FeeTxCapn, dest *FeeTx) *FeeTx { } dest.RcvAddr = src.RcvAddr() + dest.ShardId = src.ShardId() return dest } @@ -66,6 +67,7 @@ func FeeTxGoToCapn(seg *capn.Segment, src *FeeTx) capnp.FeeTxCapn { dest.SetNonce(src.Nonce) dest.SetValue(value) dest.SetRcvAddr(src.RcvAddr) + dest.SetShardId(src.ShardId) return dest } diff --git a/data/feeTx/feeTx_test.go b/data/feeTx/feeTx_test.go index 74013540c97..25e6a2d6010 100644 --- a/data/feeTx/feeTx_test.go +++ b/data/feeTx/feeTx_test.go @@ -14,6 +14,7 @@ func TestFeeTx_SaveLoad(t *testing.T) { Nonce: uint64(1), Value: big.NewInt(1), RcvAddr: []byte("receiver_address"), + ShardId: 10, } var b bytes.Buffer From e729d431a20836402ef2a70bbe2c3000c168ed06 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 11 Jul 2019 17:06:40 +0300 Subject: [PATCH 025/234] consensus, process: add signature verification on metachain headers interceptors --- cmd/node/factory/structs.go | 114 ++++++++++++++---- consensus/interface.go | 8 -- consensus/mock/consensusDataContainerMock.go | 2 +- consensus/mock/consensusStateMock.go | 27 +++-- consensus/mock/mockTestInitializer.go | 2 +- consensus/mock/validatorGroupSelectorMock.go | 27 +++-- consensus/spos/bls/export_test.go | 4 +- consensus/spos/bn/export_test.go | 4 +- .../spos/commonSubround/subroundStartRound.go | 2 +- .../commonSubround/subroundStartRound_test.go | 6 +- consensus/spos/consensusCore.go | 2 +- consensus/spos/consensusCoreValidator.go | 2 +- consensus/spos/consensusCoreValidator_test.go | 2 +- consensus/spos/consensusCore_test.go | 30 ++--- consensus/spos/consensusState_test.go | 4 +- consensus/spos/interface.go | 2 +- .../block/executingMiniblocks_test.go | 102 +++++++--------- .../block/interceptedBlocks_test.go | 58 +++++---- .../multiShard/block/testInitializer.go | 87 +++++++------ .../multiShard/metablock/testInitializer.go | 5 +- .../multisig/belnevMultisig_test.go | 44 ------- .../block/interceptedRequestHdr_test.go | 8 +- node/node.go | 4 +- process/block/interceptedBlockHeader.go | 18 +-- process/block/interceptedMetaBlockHeader.go | 59 +++++++-- .../block/interceptedMetaBlockHeader_test.go | 4 +- .../headerInterceptorBase_test.go | 12 +- .../metachainHeaderInterceptor.go | 8 +- .../metachainHeaderInterceptor_test.go | 52 +++++++- process/block/shardblock.go | 1 - .../metachain/interceptorsContainerFactory.go | 35 +++--- .../shard/interceptorsContainerFactory.go | 45 +++---- ...electorMock.go => NodesCoordinatorMock.go} | 24 ++-- sharding/nodesSetup_test.go | 2 +- 34 files changed, 454 insertions(+), 352 deletions(-) rename sharding/mock/{validatorGroupSelectorMock.go => NodesCoordinatorMock.go} (53%) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 32964d48660..e9edb2c25bc 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -545,17 +545,6 @@ func (srr *seedRandReader) Read(p []byte) (n int, err error) { return len(p), nil } -type nullChronologyValidator struct { -} - -// ValidateReceivedBlock should validate if parameters to be checked are valid -// In this implementation it just returns nil -func (*nullChronologyValidator) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - //TODO when implementing a workable variant take into account to receive headers "from future" (nonce or round > current round) - // as this might happen when clocks are slightly de-synchronized - return nil -} - func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { switch cfg.Hasher.Type { case "sha256": @@ -576,7 +565,13 @@ func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { return nil, errors.New("no marshalizer provided in config file") } -func getTrie(cfg config.StorageConfig, marshalizer marshal.Marshalizer, hasher hashing.Hasher, uniqueID string) (data.Trie, error) { +func getTrie( + cfg config.StorageConfig, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + uniqueID string, +) (data.Trie, error) { + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( getCacherFromConfig(cfg.Cache), getDBFromConfig(cfg.DB, uniqueID), @@ -619,7 +614,12 @@ func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coor return nil, errors.New("can not create blockchain") } -func createDataStoreFromConfig(config *config.Config, shardCoordinator sharding.Coordinator, uniqueID string) (dataRetriever.StorageService, error) { +func createDataStoreFromConfig( + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, +) (dataRetriever.StorageService, error) { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) } @@ -629,8 +629,20 @@ func createDataStoreFromConfig(config *config.Config, shardCoordinator sharding. return nil, errors.New("can not create data store") } -func createShardDataStoreFromConfig(config *config.Config, shardCoordinator sharding.Coordinator, uniqueID string) (dataRetriever.StorageService, error) { - var headerUnit, peerBlockUnit, miniBlockUnit, txUnit, metachainHeaderUnit, scrUnit, metaHdrHashNonceUnit, shardHdrHashNonceUnit *storageUnit.Unit +func createShardDataStoreFromConfig( + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, +) (dataRetriever.StorageService, error) { + + var headerUnit *storageUnit.Unit + var peerBlockUnit *storageUnit.Unit + var miniBlockUnit *storageUnit.Unit + var txUnit *storageUnit.Unit + var metachainHeaderUnit *storageUnit.Unit + var scrUnit *storageUnit.Unit + var metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnit *storageUnit.Unit var err error defer func() { @@ -744,7 +756,12 @@ func createShardDataStoreFromConfig(config *config.Config, shardCoordinator shar return store, err } -func createMetaChainDataStoreFromConfig(config *config.Config, shardCoordinator sharding.Coordinator, uniqueID string) (dataRetriever.StorageService, error) { +func createMetaChainDataStoreFromConfig( + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, +) (dataRetriever.StorageService, error) { + var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit var shardHdrHashNonceUnits []*storageUnit.Unit var err error @@ -1072,10 +1089,25 @@ func newInterceptorAndResolverContainerFactory( network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory(shardCoordinator, nodesCoordinator, data, core, crypto, state, network) + return newShardInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + state, + network, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory(shardCoordinator, nodesCoordinator, data, core, crypto, network) + return newMetaInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + network, + ) } return nil, nil, errors.New("could not create interceptor and resolver container factory") @@ -1220,7 +1252,11 @@ func generateGenesisHeadersForInit( } if nodesSetup.IsMetaChainActive() { - genesisBlock, err := genesis.CreateMetaGenesisBlock(uint64(nodesSetup.StartTime), nodesSetup.InitialNodesPubKeys()) + genesisBlock, err := genesis.CreateMetaGenesisBlock( + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) + if err != nil { return nil, err } @@ -1242,11 +1278,31 @@ func newBlockProcessorAndTracker( nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, nodesConfig, coreServiceContainer) + return newShardBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + nodesConfig, + coreServiceContainer, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, coreServiceContainer) + return newMetaBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + ) } return nil, nil, errors.New("could not create block processor and tracker") @@ -1329,7 +1385,12 @@ func newMetaBlockProcessorAndTracker( shardsGenesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - requestHandler, err := requestHandlers.NewMetaResolverRequestHandler(resolversFinder, factory.ShardHeadersForMetachainTopic) + + requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( + resolversFinder, + factory.ShardHeadersForMetachainTopic, + ) + if err != nil { return nil, nil, err } @@ -1453,7 +1514,14 @@ func decodeAddress(address string) ([]byte, error) { return hex.DecodeString(address) } -func getSk(ctx *cli.Context, log *logger.Logger, skName string, skIndexName string, skPemFileName string) ([]byte, error) { +func getSk( + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, +) ([]byte, error) { + //if flag is defined, it shall overwrite what was read from pem file if ctx.GlobalIsSet(skName) { encodedSk := []byte(ctx.GlobalString(skName)) diff --git a/consensus/interface.go b/consensus/interface.go index 8988133187f..90063b01192 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -4,7 +4,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" - "math/big" ) // Rounder defines the actions which should be handled by a round implementation @@ -48,13 +47,6 @@ type SposFactory interface { GenerateSubrounds() } -// Validator defines what a consensus validator implementation should do. -type Validator interface { - Stake() *big.Int - Rating() int32 - PubKey() []byte -} - // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index d16ab18cab3..4f176aa5cad 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -77,7 +77,7 @@ func (cdc *ConsensusCoreMock) SyncTimer() ntp.SyncTimer { return cdc.syncTimer } -func (cdc *ConsensusCoreMock) ValidatorGroupSelector() sharding.NodesCoordinator { +func (cdc *ConsensusCoreMock) NodesCoordinator() sharding.NodesCoordinator { return cdc.validatorGroupSelector } diff --git a/consensus/mock/consensusStateMock.go b/consensus/mock/consensusStateMock.go index 6ba2b9cd610..ae0a2562ed9 100644 --- a/consensus/mock/consensusStateMock.go +++ b/consensus/mock/consensusStateMock.go @@ -20,13 +20,12 @@ type ConsensusStateMock struct { IsBlockBodyAlreadyReceivedCalled func() bool IsHeaderAlreadyReceivedCalled func() bool CanDoSubroundJobCalled func(currentSubroundId int) bool - CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool - GenerateBitmapCalled func(subroundId int) []byte - ProcessingBlockCalled func() bool - SetProcessingBlockCalled func(processingBlock bool) - ConsensusGroupSizeCalled func() int - SetThresholdCalled func(subroundId int, threshold int) + CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, currentSubroundId int) bool + GenerateBitmapCalled func(subroundId int) []byte + ProcessingBlockCalled func() bool + SetProcessingBlockCalled func(processingBlock bool) + ConsensusGroupSizeCalled func() int + SetThresholdCalled func(subroundId int, threshold int) } func (cnsm *ConsensusStateMock) ResetConsensusState() { @@ -45,9 +44,10 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { return cnsm.GetLeaderCalled() } -func (cnsm *ConsensusStateMock) GetNextConsensusGroup(randomSource string, - vgs sharding.NodesCoordinator) ([]string, - error) { +func (cnsm *ConsensusStateMock) GetNextConsensusGroup( + randomSource string, + vgs sharding.NodesCoordinator, +) ([]string, error) { return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) } @@ -87,8 +87,11 @@ func (cnsm *ConsensusStateMock) CanDoSubroundJob(currentSubroundId int) bool { return cnsm.CanDoSubroundJobCalled(currentSubroundId) } -func (cnsm *ConsensusStateMock) CanProcessReceivedMessage(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool { +func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( + cnsDta consensus.Message, + currentRoundIndex int32, + currentSubroundId int, +) bool { return cnsm.CanProcessReceivedMessageCalled(cnsDta, currentRoundIndex, currentSubroundId) } diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index ca6271a03e0..c28e1253a58 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -122,7 +122,7 @@ func InitConsensusCore() *ConsensusCoreMock { rounderMock := &RounderMock{} shardCoordinatorMock := ShardCoordinatorMock{} syncTimerMock := &SyncTimerMock{} - validatorGroupSelector := ValidatorGroupSelectorMock{} + validatorGroupSelector := NodesCoordinatorMock{} container := &ConsensusCoreMock{ blockChain, diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index f3e5e05aa73..b662c91f48d 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -1,21 +1,22 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/sharding" "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" ) -type ValidatorGroupSelectorMock struct { +type NodesCoordinatorMock struct { ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) } -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup( +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, ) (validatorsGroup []sharding.Validator, err error) { - if vgsm.ComputeValidatorsGroupCalled != nil { - return vgsm.ComputeValidatorsGroupCalled(randomness) + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) } list := []sharding.Validator{ @@ -33,12 +34,12 @@ func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup( return list, nil } -func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { - if vgsm.GetValidatorsPublicKeysCalled != nil { - return vgsm.GetValidatorsPublicKeysCalled(randomness) +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) } - validators, err := vgsm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness) if err != nil { return nil, err } @@ -52,18 +53,18 @@ func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte return pubKeys, nil } -func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm NodesCoordinatorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (vgsm ValidatorGroupSelectorMock) SetConsensusGroupSize(int) error { +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index 30c56804ce1..fed87851273 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() sharding.NodesCoordinator { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index eccda486ab1..446eb8fde41 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() sharding.NodesCoordinator { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index c77736b8875..cbed76cbdeb 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -185,7 +185,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int32) error log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", randomSource)) - nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, sr.ValidatorGroupSelector()) + nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, sr.NodesCoordinator()) if err != nil { return err } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index fe2aaf73cb9..0f254c8c0e0 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -322,7 +322,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenShouldSyncRetur func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := mock.NodesCoordinatorMock{} err := errors.New("error") validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { return nil, err @@ -339,7 +339,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := mock.NodesCoordinatorMock{} validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { return make([]sharding.Validator, 0), nil } @@ -426,7 +426,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := mock.NodesCoordinatorMock{} err := errors.New("error") validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 5acab2254f9..323f6502b44 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -136,7 +136,7 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { } // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore -func (cc *ConsensusCore) ValidatorGroupSelector() sharding.NodesCoordinator { +func (cc *ConsensusCore) NodesCoordinator() sharding.NodesCoordinator { return cc.validatorGroupSelector } diff --git a/consensus/spos/consensusCoreValidator.go b/consensus/spos/consensusCoreValidator.go index 6deb07e982c..9acf6c94f9b 100644 --- a/consensus/spos/consensusCoreValidator.go +++ b/consensus/spos/consensusCoreValidator.go @@ -41,7 +41,7 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error { if container.SyncTimer() == nil { return ErrNilSyncTimer } - if container.ValidatorGroupSelector() == nil { + if container.NodesCoordinator() == nil { return ErrNilValidatorGroupSelector } if container.RandomnessPrivateKey() == nil { diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index ef0750be01b..2be39215281 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -22,7 +22,7 @@ func initConsensusDataContainer() *ConsensusCore { rounderMock := &mock.RounderMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} syncTimerMock := &mock.SyncTimerMock{} - validatorGroupSelector := mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := mock.NodesCoordinatorMock{} return &ConsensusCore{ blockChain: blockChain, diff --git a/consensus/spos/consensusCore_test.go b/consensus/spos/consensusCore_test.go index 6b373daafa7..1ee15063a55 100644 --- a/consensus/spos/consensusCore_test.go +++ b/consensus/spos/consensusCore_test.go @@ -28,7 +28,7 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockChain, err) @@ -54,7 +54,7 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockProcessor, err) @@ -80,7 +80,7 @@ func TestConsensusCore_WithNilBlocksTrackerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlocksTracker, err) @@ -106,7 +106,7 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlootstraper, err) @@ -132,7 +132,7 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBroadcastMessenger, err) @@ -158,7 +158,7 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilChronologyHandler, err) @@ -184,7 +184,7 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilHasher, err) @@ -210,7 +210,7 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMarshalizer, err) @@ -236,7 +236,7 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsPrivateKey, err) @@ -262,7 +262,7 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsSingleSigner, err) @@ -288,7 +288,7 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMultiSigner, err) @@ -314,7 +314,7 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { nil, consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilRounder, err) @@ -340,7 +340,7 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCoreMock.Rounder(), nil, consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilShardCoordinator, err) @@ -366,7 +366,7 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), nil, - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilSyncTimer, err) @@ -418,7 +418,7 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.NodesCoordinator()) assert.NotNil(t, consensusCore) assert.Nil(t, err) diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index e7cbe595463..d9f5a8c0613 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -138,7 +138,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou cns := internalInitConsensusState() - vgs := mock.ValidatorGroupSelectorMock{} + vgs := mock.NodesCoordinatorMock{} err := errors.New("error") vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]sharding.Validator, error) { return nil, err @@ -153,7 +153,7 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { cns := internalInitConsensusState() - vgs := mock.ValidatorGroupSelectorMock{} + vgs := mock.NodesCoordinatorMock{} nextConsensusGroup, err := cns.GetNextConsensusGroup("", vgs) assert.Nil(t, err) diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 71737728ff8..fc1a1292186 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -39,7 +39,7 @@ type ConsensusCoreHandler interface { // SyncTimer gets the SyncTimer stored in the ConsensusCore SyncTimer() ntp.SyncTimer // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore - ValidatorGroupSelector() sharding.NodesCoordinator + NodesCoordinator() sharding.NodesCoordinator // RandomnessPrivateKey returns the private key stored in the ConsensusStore used for randomness generation RandomnessPrivateKey() crypto.PrivateKey // RandomnessSingleSigner returns the single signer stored in the ConsensusStore used for randomness generation diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index f3b5dff8f40..9fd56e85c08 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -47,8 +47,10 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { defer func() { advertiser.Close() - for _, n := range nodes { - n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + n.node.Stop() + } } }() @@ -60,7 +62,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 0) txToGenerateInEachMiniBlock := 3 - proposerNode := nodes[0] + proposerNode := nodes[0][0] //sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) @@ -111,10 +113,9 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println("Step 7. Nodes from proposer's shard will have to successfully process the block sent by the proposer...") fmt.Println(makeDisplayTable(nodes)) - for _, n := range nodes { - isNodeInSenderShardAndNotProposer := n.shardId == senderShard && n != proposerNode - if isNodeInSenderShardAndNotProposer { - assert.NotNil(t, n.headers, "no headers received") + for _, n := range nodes[senderShard] { + isNotProposer := n != proposerNode + if isNotProposer { n.blkc.SetGenesisHeaderHash(n.headers[0].GetPrevHash()) err := n.blkProcessor.ProcessBlock( n.blkc, @@ -131,7 +132,9 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } fmt.Println("Step 7. Metachain processes the received header...") - metaNode := nodes[len(nodes)-1] + metaNodeList := nodes[sharding.MetachainShardId] + + metaNode := metaNodeList[len(metaNodeList)-1] _, metaHeader := proposeMetaBlock(t, metaNode, uint32(1)) metaNode.broadcastMessenger.BroadcastBlock(nil, metaHeader) metaNode.blkProcessor.CommitBlock(metaNode.blkc, metaHeader, &block.MetaBlockBody{}) @@ -161,7 +164,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println(makeDisplayTable(nodes)) fmt.Println("Step 8. Test nodes from proposer shard to have the correct balances...") - for _, n := range nodes { + for _, n := range nodes[senderShard] { isNodeInSenderShard := n.shardId == senderShard if !isNodeInSenderShard { continue @@ -183,7 +186,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { firstReceiverNodes := make([]*testNode, 0) //get first nodes from receiver shards for _, shardId := range recvShards { - receiverProposer := nodes[int(shardId)*nodesPerShard] + receiverProposer := nodes[shardId][0] firstReceiverNodes = append(firstReceiverNodes, receiverProposer) body, header := proposeBlock(t, receiverProposer, uint32(1)) @@ -194,12 +197,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { receiverProposer.broadcastMessenger.BroadcastTransactions(transactions) receiverProposer.blkProcessor.CommitBlock(receiverProposer.blkc, header, body) } + fmt.Println("Delaying for disseminating miniblocks and headers...") time.Sleep(time.Second * 5) fmt.Println(makeDisplayTable(nodes)) for _, shardId := range recvShards { - receiverProposer := nodes[int(shardId)*nodesPerShard] + receiverProposer := nodes[shardId][0] body, header := proposeBlock(t, receiverProposer, uint32(2)) receiverProposer.broadcastMessenger.BroadcastBlock(body, header) @@ -214,7 +218,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println(makeDisplayTable(nodes)) for _, shardId := range recvShards { - receiverProposer := nodes[int(shardId)*nodesPerShard] + receiverProposer := nodes[shardId][0] body, header := proposeBlock(t, receiverProposer, uint32(3)) receiverProposer.broadcastMessenger.BroadcastBlock(body, header) @@ -229,7 +233,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println(makeDisplayTable(nodes)) for _, shardId := range recvShards { - receiverProposer := nodes[int(shardId)*nodesPerShard] + receiverProposer := nodes[shardId][0] body, header := proposeBlock(t, receiverProposer, uint32(4)) receiverProposer.broadcastMessenger.BroadcastBlock(body, header) @@ -245,25 +249,9 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println("Step 10. NodesSetup from receivers shards will have to successfully process the block sent by their proposer...") fmt.Println(makeDisplayTable(nodes)) - for _, n := range nodes { - if n.shardId == sharding.MetachainShardId { - continue - } - - isNodeInReceiverShardAndNotProposer := false - for _, shardId := range recvShards { - if n.shardId == shardId { - isNodeInReceiverShardAndNotProposer = true - break - } - } - for _, proposerReceiver := range firstReceiverNodes { - if proposerReceiver == n { - isNodeInReceiverShardAndNotProposer = false - } - } + for _, recvShardId := range recvShards { + for _, n := range nodes[recvShardId][1:] { - if isNodeInReceiverShardAndNotProposer { if len(n.headers) > 0 { n.blkc.SetGenesisHeaderHash(n.headers[0].GetPrevHash()) err := n.blkProcessor.ProcessBlock( @@ -350,28 +338,20 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { return } } + } } fmt.Println("Step 11. Test nodes from receiver shards to have the correct balances...") - for _, n := range nodes { - isNodeInReceiverShardAndNotProposer := false - for _, shardId := range recvShards { - if n.shardId == shardId { - isNodeInReceiverShardAndNotProposer = true - break - } - } - if !isNodeInReceiverShardAndNotProposer { - continue - } + for _, recvShardId := range recvShards { + for _, n := range nodes[recvShardId][1:] { - //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[n.shardId] { - testPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + //test receiver balances from same shard + for _, sk := range receiversPrivateKeys[n.shardId] { + testPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + } } } - } func generateAndDisseminateTxs( @@ -428,26 +408,28 @@ func skToPk(sk crypto.PrivateKey) []byte { } func createMintingForSenders( - nodes []*testNode, + nodes map[uint32][]*testNode, senderShard uint32, sendersPrivateKeys []crypto.PrivateKey, value *big.Int, ) { - for _, n := range nodes { - //only sender shard nodes will be minted - if n.shardId != senderShard { - continue - } + for _, nodeList := range nodes { + for _, n := range nodeList { + //only sender shard nodes will be minted + if n.shardId != senderShard { + continue + } - for _, sk := range sendersPrivateKeys { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) - account, _ := n.accntState.GetAccountWithJournal(adr) - account.(*state.Account).SetBalanceWithJournal(value) - } + for _, sk := range sendersPrivateKeys { + pkBuff, _ := sk.GeneratePublic().ToByteArray() + adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.accntState.GetAccountWithJournal(adr) + account.(*state.Account).SetBalanceWithJournal(value) + } - n.accntState.Commit() + n.accntState.Commit() + } } } @@ -490,7 +472,7 @@ func proposeMetaBlock(t *testing.T, proposer *testNode, round uint32) (data.Body metaHeader.SetNonce(uint64(round)) metaHeader.SetRound(round) - metaHeader.SetPubKeysBitmap(make([]byte, 0)) + metaHeader.SetPubKeysBitmap([]byte{1, 0, 0}) sig, _ := testMultiSig.AggregateSigs(nil) metaHeader.SetSignature(sig) currHdr := proposer.blkc.GetCurrentBlockHeader() diff --git a/integrationTests/multiShard/block/interceptedBlocks_test.go b/integrationTests/multiShard/block/interceptedBlocks_test.go index 728621a7092..6f2256e811b 100644 --- a/integrationTests/multiShard/block/interceptedBlocks_test.go +++ b/integrationTests/multiShard/block/interceptedBlocks_test.go @@ -43,8 +43,10 @@ func TestHeaderAndMiniBlocksAreRoutedCorrectly(t *testing.T) { defer func() { advertiser.Close() - for _, n := range nodes { - n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + n.node.Stop() + } } }() @@ -52,44 +54,48 @@ func TestHeaderAndMiniBlocksAreRoutedCorrectly(t *testing.T) { fmt.Println("Delaying for node bootstrap and topic announcement...") time.Sleep(time.Second * 5) + proposerNode := nodes[0][0] + fmt.Println("Generating header and block body...") body, hdr := generateHeaderAndBody(senderShard, recvShards...) - err := nodes[0].broadcastMessenger.BroadcastBlock(body, hdr) + err := proposerNode.broadcastMessenger.BroadcastBlock(body, hdr) assert.Nil(t, err) - miniBlocks, _, _ := nodes[0].blkProcessor.MarshalizedDataToBroadcast(hdr, body) - err = nodes[0].broadcastMessenger.BroadcastMiniBlocks(miniBlocks) + miniBlocks, _, _ := proposerNode.blkProcessor.MarshalizedDataToBroadcast(hdr, body) + err = proposerNode.broadcastMessenger.BroadcastMiniBlocks(miniBlocks) assert.Nil(t, err) time.Sleep(time.Second * 10) - for _, n := range nodes { - isSenderShard := n.shardId == senderShard - isRecvShard := uint32InSlice(n.shardId, recvShards) - isRecvMetachain := n.shardId == sharding.MetachainShardId + for shardId, nodeList := range nodes { + for _, n := range nodeList { + isSenderShard := shardId == senderShard + isRecvShard := uint32InSlice(n.shardId, recvShards) + isRecvMetachain := n.shardId == sharding.MetachainShardId - assert.Equal(t, int32(0), atomic.LoadInt32(&n.metachainHdrRecv)) + assert.Equal(t, int32(0), atomic.LoadInt32(&n.metachainHdrRecv)) - if isSenderShard { - assert.Equal(t, int32(1), atomic.LoadInt32(&n.headersRecv)) + if isSenderShard { + assert.Equal(t, int32(1), atomic.LoadInt32(&n.headersRecv)) - shards := []uint32{senderShard} - shards = append(shards, recvShards...) + shards := []uint32{senderShard} + shards = append(shards, recvShards...) - expectedMiniblocks := getMiniBlocksHashesFromShardIds(body.(block.Body), shards...) + expectedMiniblocks := getMiniBlocksHashesFromShardIds(body.(block.Body), shards...) - assert.True(t, equalSlices(expectedMiniblocks, n.miniblocksHashes)) - } + assert.True(t, equalSlices(expectedMiniblocks, n.miniblocksHashes)) + } - if isRecvShard && !isSenderShard { - assert.Equal(t, int32(0), atomic.LoadInt32(&n.headersRecv)) - expectedMiniblocks := getMiniBlocksHashesFromShardIds(body.(block.Body), n.shardId) - assert.True(t, equalSlices(expectedMiniblocks, n.miniblocksHashes)) - } + if isRecvShard && !isSenderShard { + assert.Equal(t, int32(0), atomic.LoadInt32(&n.headersRecv)) + expectedMiniblocks := getMiniBlocksHashesFromShardIds(body.(block.Body), n.shardId) + assert.True(t, equalSlices(expectedMiniblocks, n.miniblocksHashes)) + } - if !isSenderShard && !isRecvShard && !isRecvMetachain { - //other nodes should have not received neither the header nor the miniblocks - assert.Equal(t, int32(0), atomic.LoadInt32(&n.headersRecv)) - assert.Equal(t, int32(0), atomic.LoadInt32(&n.miniblocksRecv)) + if !isSenderShard && !isRecvShard && !isRecvMetachain { + //other nodes should have not received neither the header nor the miniblocks + assert.Equal(t, int32(0), atomic.LoadInt32(&n.headersRecv)) + assert.Equal(t, int32(0), atomic.LoadInt32(&n.miniblocksRecv)) + } } } diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 5163cf0d9ac..e8d854e4095 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "math/big" "math/rand" "strings" "sync" @@ -55,7 +56,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "math/big" ) var r *rand.Rand @@ -353,42 +353,50 @@ func getConnectableAddress(mes p2p.Messenger) string { return "" } -func makeDisplayTable(nodes []*testNode) string { +func makeDisplayTable(nodes map[uint32][]*testNode) string { header := []string{"pk", "shard ID", "txs", "miniblocks", "headers", "metachain headers", "connections"} - dataLines := make([]*display.LineData, len(nodes)) - for idx, n := range nodes { - buffPk, _ := n.pk.ToByteArray() - - dataLines[idx] = display.NewLineData( - false, - []string{ - hex.EncodeToString(buffPk), - fmt.Sprintf("%d", n.shardId), - fmt.Sprintf("%d", atomic.LoadInt32(&n.txsRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.miniblocksRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.headersRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.metachainHdrRecv)), - fmt.Sprintf("%d / %d", len(n.messenger.ConnectedPeersOnTopic(factory.TransactionTopic+"_"+ - fmt.Sprintf("%d", n.shardId))), len(n.messenger.ConnectedPeers())), - }, - ) + dataLines := make([]*display.LineData, 0) + + for _, nList := range nodes { + for _, n := range nList { + buffPk, _ := n.pk.ToByteArray() + + dataLine := display.NewLineData( + false, + []string{ + hex.EncodeToString(buffPk), + fmt.Sprintf("%d", n.shardId), + fmt.Sprintf("%d", atomic.LoadInt32(&n.txsRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.miniblocksRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.headersRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.metachainHdrRecv)), + fmt.Sprintf("%d / %d", len(n.messenger.ConnectedPeersOnTopic(factory.TransactionTopic+"_"+ + fmt.Sprintf("%d", n.shardId))), len(n.messenger.ConnectedPeers())), + }, + ) + + dataLines = append(dataLines, dataLine) + } } table, _ := display.CreateTableString(header, dataLines) return table } -func displayAndStartNodes(nodes []*testNode) { - for _, n := range nodes { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() +func displayAndStartNodes(nodes map[uint32][]*testNode) { + for _, nodeList := range nodes { - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } } } @@ -411,18 +419,18 @@ func createNodes( numOfShards int, nodesPerShard int, serviceID string, -) []*testNode { +) map[uint32][]*testNode { //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 - nodes := make([]*testNode, int(numOfShards)*nodesPerShard+numMetaChainNodes) + nodes := make(map[uint32][]*testNode) nodesCoordinators := make(map[uint32][]sharding.NodesCoordinator) nodesPublicKeys := make(map[uint32][]string) - idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { shardNodesCoordinators := make([]sharding.NodesCoordinator, 0) shardPubKeys := make([]string, 0) + shardNodes := make([]*testNode, nodesPerShard) for j := 0; j < nodesPerShard; j++ { testNode := &testNode{ @@ -490,10 +498,10 @@ func createNodes( &singlesig.SchnorrSigner{}, ) - nodes[idx] = testNode - idx++ + shardNodes[j] = testNode } + nodes[uint32(shardId)] = shardNodes nodesCoordinators[uint32(shardId)] = shardNodesCoordinators nodesPublicKeys[uint32(shardId)] = shardPubKeys } @@ -501,6 +509,7 @@ func createNodes( metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) metaNodesPubKeys := make([]string, 0) + metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( @@ -510,7 +519,7 @@ func createNodes( uint32(numOfShards), ) - tn := createMetaNetNode( + metaNodes[i] = createMetaNetNode( createTestMetaDataPool(), createAccountsDB(), shardCoordinatorMeta, @@ -519,13 +528,11 @@ func createNodes( ) metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) - pkBytes, _ := tn.pk.ToByteArray() + pkBytes, _ := metaNodes[i].pk.ToByteArray() metaNodesPubKeys = append(metaNodesPubKeys, string(pkBytes)) - - idx := i + int(numOfShards)*nodesPerShard - nodes[idx] = tn } + nodes[sharding.MetachainShardId] = metaNodes nodesCoordinators[sharding.MetachainShardId] = metaNodesCoordinators nodesPublicKeys[sharding.MetachainShardId] = metaNodesPubKeys mapValidators := genValidatorsFromPubKeys(nodesPublicKeys) diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 143317921b4..c21a4fb880d 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -193,7 +193,6 @@ func createNodes( nodesCoordMap := make(map[uint32][]sharding.NodesCoordinator) pkMap := make(map[uint32][]string) - nodes := make([]*testNode, nodesInMetachain+1) //first node is a shard node shardCoordinator, _ := sharding.NewMultiShardCoordinator(1, senderShard) @@ -208,9 +207,7 @@ func createNodes( ) pk, _ := nodes[0].pk.ToByteArray() - shard0NodesCoord := make([]sharding.NodesCoordinator, 1) - shard0NodesCoord[0] = nodesCoordinator - nodesCoordMap[0] = shard0NodesCoord + nodesCoordMap[0] = []sharding.NodesCoordinator{nodesCoordinator} pkMap[0] = []string{string(pk)} metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) diff --git a/integrationTests/multisig/belnevMultisig_test.go b/integrationTests/multisig/belnevMultisig_test.go index 30f97aa7e01..2d2fa0de35c 100644 --- a/integrationTests/multisig/belnevMultisig_test.go +++ b/integrationTests/multisig/belnevMultisig_test.go @@ -228,47 +228,3 @@ func TestBelnev_MultiSigningMultipleSignersOK(t *testing.T) { err = verifySigAllSigners(multiSigners, message, aggSig, pubKeysStr, bitmap, consensusGroupSize) assert.Nil(t, err) } - -func TestBelnev_MultiSigningMultipleSignersVerifyWithNodesCoordinatorWrongGroup(t *testing.T) { - consensusGroupSize := uint16(21) - suite := kyber.NewBlakeSHA256Ed25519() - kg := signing.NewKeyGenerator(suite) - - privKeys, pubKeysStr := generateKeyPairs(kg, consensusGroupSize) - hasher := sha256.Sha256{} - - multiSigners, err := createMultiSigners(kg, hasher, privKeys, pubKeysStr) - assert.Nil(t, err) - - err = createAndSetCommitmentsAllSigners(multiSigners) - assert.Nil(t, err) - - bitmapSize := consensusGroupSize/8 + 1 - // set bitmap to select all 21 members - bitmap := make([]byte, bitmapSize) - byteMask := 0xFF - - for i := uint16(0); i < bitmapSize; i++ { - bitmap[i] = byte((((1 << consensusGroupSize) - 1) >> i) & byteMask) - } - - err = aggregateCommitmentsForAllSigners(multiSigners, bitmap, consensusGroupSize) - assert.Nil(t, err) - - message := []byte("message to be signed") - assert.Nil(t, err) - - err = createAndSetSignatureSharesAllSigners(multiSigners, message, bitmap) - assert.Nil(t, err) - - aggSig, err := aggregateSignatureSharesAllSigners(multiSigners, bitmap, consensusGroupSize) - assert.Nil(t, err) - - err = verifySigAllSigners(multiSigners, message, aggSig, pubKeysStr, bitmap, consensusGroupSize) - assert.Nil(t, err) -} - -func TestBelnev_MultiSigningMultipleSignersVerifyWithNodesCoordinatorOK(t *testing.T) { - -} - diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index f89bf5f4d71..d5d05412b0c 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -72,14 +72,10 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { ) pubKeyMap := make(map[uint32][]string) - shard0PubKeys := make([]string, 2) pk1Bytes, _ := pk1.ToByteArray() pk2Bytes, _ := pk2.ToByteArray() - shard0PubKeys[0] = string(pk1Bytes) - shard0PubKeys[1] = string(pk2Bytes) - - pubKeyMap[0] = shard0PubKeys + pubKeyMap[0] = []string{string(pk1Bytes), string(pk2Bytes)} validatorsMap := genValidatorsFromPubKeys(pubKeyMap) nodesCoordinator1.LoadNodesPerShards(validatorsMap) nodesCoordinator2.LoadNodesPerShards(validatorsMap) @@ -132,7 +128,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { } hdrBuff, _ := marshalizer.Marshal(&hdr1) - msig, _ := multiSigner.Create(shard0PubKeys, 0) + msig, _ := multiSigner.Create(pubKeyMap[0], 0) bitmap := []byte{1, 0, 0} _, _ = msig.CreateSignatureShare(hdrBuff, bitmap) aggSig, _ := msig.AggregateSigs(bitmap) diff --git a/node/node.go b/node/node.go index 33364f6e1dd..787fce7b7b7 100644 --- a/node/node.go +++ b/node/node.go @@ -287,7 +287,9 @@ func (n *Node) StartConsensus() error { n.rounder, n.shardCoordinator, n.syncTimer, - nCoordinator) + nCoordinator, + ) + if err != nil { return err } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index b2461eb39f2..d1b727466a4 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -3,19 +3,19 @@ package block import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/marshal" ) // InterceptedHeader represents the wrapper over HeaderWrapper struct. // It implements Newer and Hashed interfaces type InterceptedHeader struct { *block.Header - multiSigVerifier crypto.MultiSigVerifier - hash []byte - nodesCoordinator sharding.NodesCoordinator - marshalizer marshal.Marshalizer + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer } // NewInterceptedHeader creates a new instance of InterceptedHeader struct @@ -26,10 +26,10 @@ func NewInterceptedHeader( ) *InterceptedHeader { return &InterceptedHeader{ - Header: &block.Header{}, - multiSigVerifier: multiSigVerifier, - nodesCoordinator: nodesCoordinator, - marshalizer: marshalizer, + Header: &block.Header{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, } } diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index 5b2f7e3aa08..227fd9a3334 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -5,24 +5,31 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/marshal" ) // InterceptedHeader represents the wrapper over HeaderWrapper struct. // It implements Newer and Hashed interfaces type InterceptedMetaHeader struct { *block.MetaBlock - multiSigVerifier crypto.MultiSigVerifier - hash []byte + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedMetaHeader( multiSigVerifier crypto.MultiSigVerifier, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, ) *InterceptedMetaHeader { return &InterceptedMetaHeader{ - MetaBlock: &block.MetaBlock{}, - multiSigVerifier: multiSigVerifier, + MetaBlock: &block.MetaBlock{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, } } @@ -97,10 +104,46 @@ func (imh *InterceptedMetaHeader) Integrity(coordinator sharding.Coordinator) er // VerifySig verifies a signature func (imh *InterceptedMetaHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers - return nil + randSeed := imh.GetPrevRandSeed() + bitmap := imh.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap + } + + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing + + } + consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed) + if err != nil { + return err + } + + verifier, err := imh.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(imh.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *imh + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + headerBytes, err := imh.marshalizer.Marshal(headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(headerBytes, bitmap) + + return err } // IsInterfaceNil return if there is no value under the interface diff --git a/process/block/interceptedMetaBlockHeader_test.go b/process/block/interceptedMetaBlockHeader_test.go index 2b989b86e50..11a3b91fed0 100644 --- a/process/block/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedMetaBlockHeader_test.go @@ -13,6 +13,8 @@ import ( func createTestInterceptedMetaHeader() *block.InterceptedMetaHeader { return block.NewInterceptedMetaHeader( mock.NewMultiSigner(), + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, ) } @@ -297,7 +299,7 @@ func TestInterceptedMetaHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedMetaHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.PrevRandSeed = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index 3c0ebf197fc..7f55c503500 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -2,18 +2,18 @@ package interceptors_test import ( "errors" + "fmt" + "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/consensus" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/consensus" - "math/big" - "fmt" + "github.com/stretchr/testify/assert" ) //------- NewHeaderInterceptorBase @@ -245,13 +245,11 @@ func createNodesCoordinator() sharding.NodesCoordinator { } //metachain - metachainValidators := make([]sharding.Validator, 0) pubKeyBytes := []byte("pk_meta") v, _ := consensus.NewValidator(big.NewInt(0), 1, pubKeyBytes) - metachainValidators = append(metachainValidators, v) validators[0] = shardValidators - validators[sharding.MetachainShardId] = metachainValidators + validators[sharding.MetachainShardId] = []sharding.Validator{v} nodesCoordinator := mock.NewNodesCoordinatorMock() nodesCoordinator.LoadNodesPerShards(validators) diff --git a/process/block/interceptors/metachainHeaderInterceptor.go b/process/block/interceptors/metachainHeaderInterceptor.go index 3b0ed1c8bd3..8ede6602996 100644 --- a/process/block/interceptors/metachainHeaderInterceptor.go +++ b/process/block/interceptors/metachainHeaderInterceptor.go @@ -22,6 +22,7 @@ type MetachainHeaderInterceptor struct { multiSigVerifier crypto.MultiSigVerifier hasher hashing.Hasher shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator } // NewMetachainHeaderInterceptor hooks a new interceptor for metachain block headers @@ -34,6 +35,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) (*MetachainHeaderInterceptor, error) { if marshalizer == nil { @@ -57,6 +59,9 @@ func NewMetachainHeaderInterceptor( if shardCoordinator == nil { return nil, process.ErrNilShardCoordinator } + if nodesCoordinator == nil { + return nil, process.ErrNilNodesCoordinator + } return &MetachainHeaderInterceptor{ messageChecker: &messageChecker{}, @@ -66,6 +71,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier: multiSigVerifier, hasher: hasher, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, metachainHeadersNonces: metachainHeadersNonces, }, nil } @@ -78,7 +84,7 @@ func (mhi *MetachainHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } - metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier) + metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier, mhi.nodesCoordinator, mhi.marshalizer) err = mhi.marshalizer.Unmarshal(metaHdrIntercepted, message.Data()) if err != nil { return err diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index 98ab5bd9d21..7da93b5c1d9 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -30,6 +30,7 @@ func TestNewMetachainHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -49,6 +50,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersDataPool, err) @@ -68,6 +70,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersNoncesShouldErr(t *tes mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersNoncesDataPool, err) @@ -87,6 +90,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainStorerShouldErr(t *testing.T) mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersStorage, err) @@ -107,6 +111,7 @@ func TestNewMetachainHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, mhi) @@ -127,6 +132,7 @@ func TestNewMetachainHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHasher, err) @@ -147,12 +153,34 @@ func TestNewMetachainHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, nil, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, mhi) } +func TestNewMetachainHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + metachainHeaders := &mock.CacherStub{} + metachainStorer := &mock.StorerStub{} + + mhi, err := interceptors.NewMetachainHeaderInterceptor( + &mock.MarshalizerMock{}, + metachainHeaders, + &mock.Uint64CacherStub{}, + metachainStorer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + ) + + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, mhi) +} + func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -167,6 +195,7 @@ func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, err) @@ -189,6 +218,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMessage, mhi.ProcessReceivedMessage(nil)) @@ -208,6 +238,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilDataToProcessShould mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{} @@ -234,6 +265,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnm mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{ @@ -250,6 +282,8 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul metachainStorer := &mock.StorerStub{} marshalizer := &mock.MarshalizerMock{} multisigner := mock.NewMultiSigner() + nodesCoordinator := mock.NewNodesCoordinatorMock() + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, @@ -258,9 +292,10 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -283,6 +318,8 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te }, } multisigner := mock.NewMultiSigner() + nodesCoordinator := createNodesCoordinator() + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, @@ -291,12 +328,13 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) hdr.RootHash = make([]byte, 0) @@ -354,6 +392,9 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd return nil }, } + + nodesCoordinator := createNodesCoordinator() + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, @@ -362,12 +403,13 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.SetHash([]byte("aaa")) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 98f66a54f64..2fea3a15d44 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1313,7 +1313,6 @@ func (sp *shardProcessor) createMiniBlocks( // CreateBlockHeader creates a miniblock header list given a block body func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint32, haveTime func() bool) (data.HeaderHandler, error) { - // TODO: add PrevRandSeed and RandSeed when BLS signing is completed header := &block.Header{ MiniBlockHeaders: make([]block.MiniBlockHeader, 0), RootHash: sp.getRootHash(), diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index 2733f4094e7..629e74f4111 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -15,15 +15,15 @@ import ( ) type interceptorsContainerFactory struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - store dataRetriever.StorageService - dataPool dataRetriever.MetaPoolsHolder - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - messenger process.TopicHandler - multiSigner crypto.MultiSigner - tpsBenchmark *statistics.TpsBenchmark + marshalizer marshal.Marshalizer + hasher hashing.Hasher + store dataRetriever.StorageService + dataPool dataRetriever.MetaPoolsHolder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + messenger process.TopicHandler + multiSigner crypto.MultiSigner + tpsBenchmark *statistics.TpsBenchmark } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -64,14 +64,14 @@ func NewInterceptorsContainerFactory( } return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - multiSigner: multiSigner, - dataPool: dataPool, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + multiSigner: multiSigner, + dataPool: dataPool, }, nil } @@ -128,6 +128,7 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin icf.multiSigner, icf.hasher, icf.shardCoordinator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 67732b7a3b9..1c2d5d19184 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -16,17 +16,17 @@ import ( ) type interceptorsContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - nodesCoordinator sharding.NodesCoordinator + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -79,17 +79,17 @@ func NewInterceptorsContainerFactory( } return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, }, nil } @@ -392,6 +392,7 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ icf.multiSigner, icf.hasher, icf.shardCoordinator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err diff --git a/sharding/mock/validatorGroupSelectorMock.go b/sharding/mock/NodesCoordinatorMock.go similarity index 53% rename from sharding/mock/validatorGroupSelectorMock.go rename to sharding/mock/NodesCoordinatorMock.go index eca63085408..b5f0ba1e03c 100644 --- a/sharding/mock/validatorGroupSelectorMock.go +++ b/sharding/mock/NodesCoordinatorMock.go @@ -6,14 +6,14 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -type ValidatorGroupSelectorMock struct { +type NodesCoordinatorMock struct { ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) } -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { - if vgsm.ComputeValidatorsGroupCalled != nil { - return vgsm.ComputeValidatorsGroupCalled(randomness) +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) } list := []sharding.Validator{ @@ -31,12 +31,12 @@ func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) return list, nil } -func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { - if vgsm.GetValidatorsPublicKeysCalled != nil { - return vgsm.GetValidatorsPublicKeysCalled(randomness) +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) } - validators, err := vgsm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness) if err != nil { return nil, err } @@ -50,18 +50,18 @@ func (vgsm ValidatorGroupSelectorMock) GetValidatorsPublicKeys(randomness []byte return pubKeys, nil } -func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm NodesCoordinatorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (vgsm ValidatorGroupSelectorMock) SetConsensusGroupSize(int) error { +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (vgsm ValidatorGroupSelectorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 15f7a1da3ec..4c327c4194b 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -4,8 +4,8 @@ import ( "encoding/hex" "testing" - "github.com/stretchr/testify/assert" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) func createNodesSetupOneShardOneNode() *sharding.NodesSetup { From 37e39fcff61f563177cd01907e286be6faadc43e Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 12 Jul 2019 10:14:41 +0300 Subject: [PATCH 026/234] process: fix meta block header interceptor - copy the header not intercepted object fix race on isSynchronized --- integrationTests/multiShard/block/executingMiniblocks_test.go | 2 +- ntp/syncTime.go | 2 ++ process/block/interceptedMetaBlockHeader.go | 2 +- process/sync/baseSync.go | 4 ++++ 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 9fd56e85c08..7f73bf59bdd 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -156,7 +156,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { time.Sleep(time.Second * 5) fmt.Println(makeDisplayTable(nodes)) - _, metaHeader = proposeMetaBlock(t, metaNode, uint32(3)) + _, metaHeader = proposeMetaBlock(t, metaNode, uint32(4)) metaNode.broadcastMessenger.BroadcastBlock(nil, metaHeader) metaNode.blkProcessor.CommitBlock(metaNode.blkc, metaHeader, &block.MetaBlockBody{}) fmt.Println("Delaying for disseminating meta header...") diff --git a/ntp/syncTime.go b/ntp/syncTime.go index 2fe9390431f..76ac6403079 100644 --- a/ntp/syncTime.go +++ b/ntp/syncTime.go @@ -150,5 +150,7 @@ func (s *syncTime) formatTime(time time.Time) string { // CurrentTime method gets the current time on which is added the current offset func (s *syncTime) CurrentTime() time.Time { + s.mut.RLock() + defer s.mut.RUnlock() return time.Now().Add(s.clockOffset) } diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index 227fd9a3334..bf0c94a89c1 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -132,7 +132,7 @@ func (imh *InterceptedMetaHeader) VerifySig() error { // get marshalled block header without signature and bitmap // as this is the message that was signed - headerCopy := *imh + headerCopy := *imh.MetaBlock headerCopy.Signature = nil headerCopy.PubKeysBitmap = nil diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index d0a949267f6..9ee617a4c7c 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -53,6 +53,7 @@ type baseBootstrap struct { chStopSync chan bool waitTime time.Duration + mutNodeSynched sync.RWMutex isNodeSynchronized bool hasLastBlock bool roundIndex int32 @@ -397,6 +398,9 @@ func (boot *baseBootstrap) waitForHeaderNonce() error { // is not synchronized yet and it has to continue the bootstrapping mechanism, otherwise the node is already // synched and it can participate to the consensus, if it is in the jobDone group of this rounder func (boot *baseBootstrap) ShouldSync() bool { + boot.mutNodeSynched.Lock() + defer boot.mutNodeSynched.Unlock() + isNodeSynchronizedInCurrentRound := boot.roundIndex == boot.rounder.Index() && boot.isNodeSynchronized if isNodeSynchronizedInCurrentRound { return false From c8f3be5646f5ca84694ef8f1e05ac8b53c641c05 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 15 Jul 2019 11:00:43 +0300 Subject: [PATCH 027/234] merge with master and fix tests. --- cmd/node/factory/structs.go | 1 + .../mock/specialAddressHandlerMock.go | 31 ++++++++++++++++ .../smartContract/executingSCCalls_test.go | 37 +++++++++---------- .../smartContract/testInitilalizer.go | 8 ++++ ...rmediateProcessorsContainerFactory_test.go | 1 + process/unsigned/feeTxHandler.go | 8 ++++ 6 files changed, 66 insertions(+), 20 deletions(-) create mode 100644 integrationTests/mock/specialAddressHandlerMock.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index af44148f10d..7e89e56df5c 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1282,6 +1282,7 @@ func newShardBlockProcessorAndTracker( core.Hasher, state.AddressConverter, specialAddressHolder, + data.Store, ) if err != nil { return nil, nil, err diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..734df59496d --- /dev/null +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -0,0 +1,31 @@ +package mock + +type SpecialAddressHandlerMock struct { + ElrondCommunityAddressCalled func() []byte + OwnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) uint32 +} + +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond") + } + + return sh.ElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { + if sh.OwnAddressCalled == nil { + return []byte("leader") + } + + return sh.OwnAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { + if sh.ShardIdForAddressCalled == nil { + return 0 + } + + return sh.ShardIdForAddressCalled(addr) +} diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 9223781f19d..086a3a1be05 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -23,7 +23,7 @@ var initialValueForInternalVariable = uint64(45) func createScCallsNodes() (p2p.Messenger, []*testNode) { advertiser := createMessengerWithKadDht(context.Background(), "") - advertiser.Bootstrap() + _ = advertiser.Bootstrap() nodes := createNodes( 2, @@ -102,9 +102,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() + _ = advertiser.Close() for _, n := range nodes { - n.node.Stop() + _ = n.node.Stop() } }() @@ -183,9 +183,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() + _ = advertiser.Close() for _, n := range nodes { - n.node.Stop() + _ = n.node.Stop() } }() @@ -239,9 +239,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond // Test again that the gas for calling the smart contract was substracted from the sender's account acc, _ = proposerNodeShard2.node.GetAccount(hex.EncodeToString(secondShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should substract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) + + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) assert.Equal(t, receiverNonce, acc.Nonce) receiverNonce++ @@ -257,7 +257,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond storedVal, _ := scAccount.DataTrieTracker().RetrieveValue([]byte("a")) storedValBI := big.NewInt(0).SetBytes(storedVal) - assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable + addValue)), storedValBI) + assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable+addValue)), storedValBI) } // Test within a network of two shards the following situation @@ -280,9 +280,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() + _ = advertiser.Close() for _, n := range nodes { - n.node.Stop() + _ = n.node.Stop() } }() @@ -385,14 +385,13 @@ func processAndTestSmartContractCallInSender( // Test again that the gas for calling the smart contract was substracted from the sender's account acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should substract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) assert.Equal(t, scNonce, acc.Nonce) } func processAndTestSmartContractCallInDestination(t *testing.T, contractCallTx *transaction.Transaction, - proposerNodeShardSC *testNode, scDeploymentAdddress []byte, scShard, accShard uint32, scNonce uint64, ) { + proposerNodeShardSC *testNode, scDeploymentAdddress []byte, scShard, accShard uint32, scNonce uint64) { txBytes, _ := testMarshalizer.Marshal(contractCallTx) txHash := testHasher.Compute(string(txBytes)) blockBody := block.Body{ @@ -400,7 +399,7 @@ func processAndTestSmartContractCallInDestination(t *testing.T, contractCallTx * TxHashes: [][]byte{txHash}, ReceiverShardID: scShard, SenderShardID: accShard, - Type: block.TxBlock, + Type: block.TxBlock, }, } // Before processing make sure to add the tx into the pool of the scShard @@ -436,7 +435,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo _ = testMarshalizer.Unmarshal(tx, txBytes) // Now execute transaction back into the account shard - proposerNodeShardAccount.txProcessor.ProcessTransaction(tx, generalRoundNumber) + _ = proposerNodeShardAccount.txProcessor.ProcessTransaction(tx, generalRoundNumber) generalRoundNumber++ } _, err := proposerNodeShardAccount.accntState.Commit() @@ -444,9 +443,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo // After execution, the first account that started the interaction with the smart contract should have: // - Initial balance + withdraw value - fees - // TODO: Fees and gas should be taken into consideration when the fees are implemented - now we have extra money - // from the gas returned since the gas was not substracted in the first place - finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue + uint64(gasLimit - 1*gasPrice)))) + finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue-1))) acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) assert.Equal(t, finalValue, acc.Balance) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index cdedb32e624..c385858b23e 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -269,10 +269,13 @@ func createNetNode( testMarshalizer, testHasher, testAddressConverter, + &mock.SpecialAddressHandlerMock{}, store, ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + txFeeInter, _ := interimProcContainer.Get(dataBlock.TxFeeBlock) + txFeeHandler, _ := txFeeInter.(process.UnsignedTxHandler) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) argsParser, _ := smartContract.NewAtArgumentParser() @@ -286,8 +289,11 @@ func createNetNode( addrConv, shardCoordinator, scForwarder, + txFeeHandler, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) + txProcessor, _ := transaction.NewTxProcessor( accntAdapter, testHasher, @@ -295,6 +301,8 @@ func createNetNode( testMarshalizer, shardCoordinator, scProcessor, + txFeeHandler, + txTypeHandler, ) fact, _ := shard.NewPreProcessorsContainerFactory( diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index a3b1c4a7388..f002f83dfe5 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -79,6 +79,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, nil, ) diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 5876ab9de4a..26f22a0bf43 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -52,10 +52,18 @@ func NewFeeTxHandler( return ftxh, nil } +// SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saaved for txs +func (ftxh *feeTxHandler) SaveCurrentIntermediateTxToStorage() error { + //TODO implement me - save only created feeTxs + return nil +} + +// AddIntermediateTransactions adds intermediate transactions to local cache func (ftxh *feeTxHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { return nil } +// CreateAllInterMiniBlocks creates miniblocks from process transactions func (ftxh *feeTxHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { calculatedFeeTxs := ftxh.CreateAllUTxs() From ce45b23642a74f2a8c489fa5d5b2ed6779c67c88 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 18 Jul 2019 09:48:33 +0900 Subject: [PATCH 028/234] cmd, node, process: Enable interceptor signature verification --- cmd/node/factory/structs.go | 2 + cmd/node/main.go | 125 ++++++++++++++++-- consensus/spos/bls/subroundEndRound.go | 1 + go.sum | 3 +- .../block/interceptedRequestHdr_test.go | 3 +- node/node.go | 1 + process/block/interceptedBlockHeader.go | 11 +- process/block/interceptedBlockHeader_test.go | 1 + .../interceptors/headerInterceptorBase.go | 25 ++-- .../headerInterceptorBase_test.go | 5 +- .../interceptors/headerInterceptor_test.go | 9 +- .../metablock/shardHeaderInterceptor_test.go | 6 +- 12 files changed, 155 insertions(+), 37 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index e9edb2c25bc..40e0e225e52 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -414,6 +414,7 @@ func NewProcessComponentsFactoryArgs( nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -426,6 +427,7 @@ func NewProcessComponentsFactoryArgs( nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, data: data, core: core, crypto: crypto, diff --git a/cmd/node/main.go b/cmd/node/main.go index c8a0709a991..71ae838b781 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "math" + "math/big" "net/http" "os" "os/signal" @@ -19,6 +20,7 @@ import ( "github.com/ElrondNetwork/elrond-go/cmd/node/factory" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" @@ -425,6 +427,15 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + nodesCoordinator, err := createNodesCoordinator( + nodesConfig, + generalConfig.GeneralSettings, + pubKey, + coreComponents.Hasher) + if err != nil { + return err + } + stateArgs := factory.NewStateComponentsFactoryArgs(generalConfig, genesisConfig, shardCoordinator, coreComponents) stateComponents, err := factory.StateComponentsFactory(stateArgs) if err != nil { @@ -442,8 +453,18 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } - cryptoArgs := factory.NewCryptoComponentsFactoryArgs(ctx, generalConfig, nodesConfig, shardCoordinator, keyGen, - privKey, log, initialBalancesSkPemFile.Name, txSignSk.Name, txSignSkIndex.Name) + cryptoArgs := factory.NewCryptoComponentsFactoryArgs( + ctx, + generalConfig, + nodesConfig, + shardCoordinator, + keyGen, + privKey, + log, + initialBalancesSkPemFile.Name, + txSignSk.Name, + txSignSkIndex.Name, + ) cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) if err != nil { return err @@ -478,8 +499,19 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - processArgs := factory.NewProcessComponentsFactoryArgs(genesisConfig, nodesConfig, syncer, shardCoordinator, - dataComponents, coreComponents, cryptoComponents, stateComponents, networkComponents, coreServiceContainer) + processArgs := factory.NewProcessComponentsFactoryArgs( + genesisConfig, + nodesConfig, + syncer, + shardCoordinator, + nodesCoordinator, + dataComponents, + coreComponents, + cryptoComponents, + stateComponents, + networkComponents, + coreServiceContainer, + ) processComponents, err := factory.ProcessComponentsFactory(processArgs) if err != nil { return err @@ -604,23 +636,31 @@ func loadMainConfig(filepath string, log *logger.Logger) (*config.Config, error) return cfg, nil } -func createShardCoordinator( - nodesConfig *sharding.NodesSetup, - pubKey crypto.PublicKey, - settingsConfig config.GeneralSettingsConfig, - log *logger.Logger, -) (shardCoordinator sharding.Coordinator, - err error) { +func getShardIdFromNodePubKey(pubKey crypto.PublicKey, nodesConfig *sharding.NodesSetup) (uint32, error) { if pubKey == nil { - return nil, errors.New("nil public key, could not create shard coordinator") + return 0, errors.New("nil public key") } publicKey, err := pubKey.ToByteArray() if err != nil { - return nil, err + return 0, err } selfShardId, err := nodesConfig.GetShardIDForPubKey(publicKey) + if err != nil { + return 0, err + } + + return selfShardId, err +} + +func createShardCoordinator( + nodesConfig *sharding.NodesSetup, + pubKey crypto.PublicKey, + settingsConfig config.GeneralSettingsConfig, + log *logger.Logger, +) (shardCoordinator sharding.Coordinator, err error) { + selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) if err == sharding.ErrNoValidPublicKey { log.Info("Starting as observer node...") selfShardId, err = processDestinationShardAsObserver(settingsConfig) @@ -645,6 +685,65 @@ func createShardCoordinator( return shardCoordinator, nil } +func createNodesCoordinator( + nodesConfig *sharding.NodesSetup, + settingsConfig config.GeneralSettingsConfig, + pubKey crypto.PublicKey, + hasher hashing.Hasher, +) (sharding.NodesCoordinator, error) { + + shardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) + if err == sharding.ErrNoValidPublicKey { + shardId, err = processDestinationShardAsObserver(settingsConfig) + } + if err != nil { + return nil, err + } + + var consensusGroupSize int + nbShards := nodesConfig.NumberOfShards() + + if shardId == sharding.MetachainShardId { + consensusGroupSize = int(nodesConfig.MetaChainConsensusGroupSize) + } else { + consensusGroupSize = int(nodesConfig.ConsensusGroupSize) + } + + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + consensusGroupSize, + hasher, + shardId, + nbShards, + ) + if err != nil { + return nil, err + } + + initNodesPubKeys := nodesConfig.InitialNodesPubKeys() + initValidators := make(map[uint32][]sharding.Validator) + + for shardId, pubKeyList := range initNodesPubKeys { + validators := make([]sharding.Validator, 0) + for _, pubKey := range pubKeyList { + // TODO: the stake needs to be associated to the staking account + validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(pubKey)) + if err != nil { + return nil, err + } + + validators = append(validators, validator) + } + initValidators[shardId] = validators + } + + err = nodesCoordinator.LoadNodesPerShards(initValidators) + if err != nil { + return nil, err + } + + return nodesCoordinator, nil +} + func processDestinationShardAsObserver(settingsConfig config.GeneralSettingsConfig) (uint32, error) { destShard := strings.ToLower(settingsConfig.DestinationShardAsObserver) if len(destShard) == 0 { diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index cfc59ac2098..6de10e4ed93 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/consensus/spos" + "github.com/ElrondNetwork/elrond-go/core" ) type subroundEndRound struct { diff --git a/go.sum b/go.sum index 6de883bf7a4..ff57dfb87a4 100644 --- a/go.sum +++ b/go.sum @@ -77,7 +77,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -135,8 +134,8 @@ github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr1 github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index d5d05412b0c..2531834a33e 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -128,9 +128,10 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { } hdrBuff, _ := marshalizer.Marshal(&hdr1) + hdrHash := hasher.Compute(string(hdrBuff)) msig, _ := multiSigner.Create(pubKeyMap[0], 0) bitmap := []byte{1, 0, 0} - _, _ = msig.CreateSignatureShare(hdrBuff, bitmap) + _, _ = msig.CreateSignatureShare(hdrHash, bitmap) aggSig, _ := msig.AggregateSigs(bitmap) hdr1.PubKeysBitmap = bitmap diff --git a/node/node.go b/node/node.go index 0e8d2735aa5..507068a2ce4 100644 --- a/node/node.go +++ b/node/node.go @@ -88,6 +88,7 @@ type Node struct { metaDataPool dataRetriever.MetaPoolsHolder store dataRetriever.StorageService shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator consensusTopic string consensusType string diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index d1b727466a4..0244089d5b6 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -3,6 +3,7 @@ package block import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -16,6 +17,7 @@ type InterceptedHeader struct { hash []byte nodesCoordinator sharding.NodesCoordinator marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct @@ -23,6 +25,7 @@ func NewInterceptedHeader( multiSigVerifier crypto.MultiSigVerifier, nodesCoordinator sharding.NodesCoordinator, marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedHeader { return &InterceptedHeader{ @@ -30,6 +33,7 @@ func NewInterceptedHeader( multiSigVerifier: multiSigVerifier, nodesCoordinator: nodesCoordinator, marshalizer: marshalizer, + hasher: hasher, } } @@ -128,6 +132,10 @@ func (inHdr *InterceptedHeader) VerifySig() error { return err } + for i, pubKey := range consensusPubKeys { + log.Info(fmt.Sprintf("[%d]: %s\n", i, core.ToHex([]byte(pubKey)))) + } + verifier, err := inHdr.multiSigVerifier.Create(consensusPubKeys, 0) if err != nil { return err @@ -149,7 +157,8 @@ func (inHdr *InterceptedHeader) VerifySig() error { return err } - err = verifier.Verify(headerBytes, bitmap) + hash := inHdr.hasher.Compute(string(headerBytes)) + err = verifier.Verify(hash, bitmap) return err } diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index be738d67779..dc99d3b8ea5 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -16,6 +16,7 @@ func createTestInterceptedHeader() *block.InterceptedHeader { mock.NewMultiSigner(), &mock.NodesCoordinatorMock{}, &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } diff --git a/process/block/interceptors/headerInterceptorBase.go b/process/block/interceptors/headerInterceptorBase.go index 50fd40571f8..b3888709c13 100644 --- a/process/block/interceptors/headerInterceptorBase.go +++ b/process/block/interceptors/headerInterceptorBase.go @@ -13,12 +13,12 @@ import ( // HeaderInterceptorBase is the "abstract class" extended in HeaderInterceptor and ShardHeaderInterceptor type HeaderInterceptorBase struct { - marshalizer marshal.Marshalizer - storer storage.Storer - multiSigVerifier crypto.MultiSigVerifier - hasher hashing.Hasher - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer + storer storage.Storer + multiSigVerifier crypto.MultiSigVerifier + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator } // NewHeaderInterceptorBase creates a new HeaderIncterceptorBase instance @@ -50,12 +50,12 @@ func NewHeaderInterceptorBase( } hdrIntercept := &HeaderInterceptorBase{ - marshalizer: marshalizer, - storer: storer, - multiSigVerifier: multiSigVerifier, - hasher: hasher, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, + storer: storer, + multiSigVerifier: multiSigVerifier, + hasher: hasher, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, } return hdrIntercept, nil @@ -75,6 +75,7 @@ func (hib *HeaderInterceptorBase) ParseReceivedMessage(message p2p.MessageP2P) ( hib.multiSigVerifier, hib.nodesCoordinator, hib.marshalizer, + hib.hasher, ) err := hib.marshalizer.Unmarshal(hdrIntercepted, message.Data()) if err != nil { diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index 7f55c503500..abb2e5a0813 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -209,6 +209,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t t.Parallel() storer := &mock.StorerStub{} + hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} multisigner := mock.NewMultiSigner() @@ -222,7 +223,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageSanityCheckFailedShouldErr(t nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -279,7 +280,7 @@ func TestHeaderInterceptorBase_ParseReceivedMessageValsOkShouldWork(t *testing.T nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, mock.HasherMock{}) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 8e0ab222405..b975771aa42 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -167,6 +167,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { @@ -196,7 +197,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -243,6 +244,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { @@ -273,7 +275,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -315,6 +317,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() headersNonces := &mock.Uint64CacherStub{} headersNonces.HasOrAddCalled = func(u uint64, i interface{}) (b bool, b2 bool) { @@ -352,7 +355,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) diff --git a/process/metablock/shardHeaderInterceptor_test.go b/process/metablock/shardHeaderInterceptor_test.go index 641ef867763..cd9d803309b 100644 --- a/process/metablock/shardHeaderInterceptor_test.go +++ b/process/metablock/shardHeaderInterceptor_test.go @@ -131,7 +131,7 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testin nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, mock.HasherMock{}) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -191,7 +191,7 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, mock.HasherMock{}) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) @@ -257,7 +257,7 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, mock.HasherMock{}) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) From 21f2d29dec1dfe6469a545541e2ac9f0ab45c2c8 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 18 Jul 2019 09:31:48 +0300 Subject: [PATCH 029/234] started implementation of metachain state --- data/state/factory/peerAccountCreator.go | 1 + data/state/factory/peerAccountCreator_test.go | 1 + data/state/peerAccount.go | 166 ++++++++++++++++++ data/state/peerAccount_test.go | 1 + data/state/peerJournalEntries.go | 1 + data/state/peerJournalEntries_test.go | 1 + 6 files changed, 171 insertions(+) create mode 100644 data/state/factory/peerAccountCreator.go create mode 100644 data/state/factory/peerAccountCreator_test.go create mode 100644 data/state/peerAccount.go create mode 100644 data/state/peerAccount_test.go create mode 100644 data/state/peerJournalEntries.go create mode 100644 data/state/peerJournalEntries_test.go diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go new file mode 100644 index 00000000000..7312cd2e2d8 --- /dev/null +++ b/data/state/factory/peerAccountCreator.go @@ -0,0 +1 @@ +package factory diff --git a/data/state/factory/peerAccountCreator_test.go b/data/state/factory/peerAccountCreator_test.go new file mode 100644 index 00000000000..7312cd2e2d8 --- /dev/null +++ b/data/state/factory/peerAccountCreator_test.go @@ -0,0 +1 @@ +package factory diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go new file mode 100644 index 00000000000..09f2f5fa8b3 --- /dev/null +++ b/data/state/peerAccount.go @@ -0,0 +1,166 @@ +package state + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +// PeerAccount is the struct used in serialization/deserialization +type PeerAccount struct { + BLSPublicKey []byte + SchnorrPublicKey []byte + Address []byte + JailStartTime uint64 + JailEndTime uint64 + CurrentShardId uint32 + NextShardId uint32 + Stake *big.Int + + addressContainer AddressContainer + accountTracker AccountTracker + dataTrieTracker DataTrieTracker +} + +// NewPeerAccount creates new simple account wrapper for an PeerAccountContainer (that has just been initialized) +func NewPeerAccount(addressContainer AddressContainer, tracker AccountTracker) (*PeerAccount, error) { + if addressContainer == nil { + return nil, ErrNilAddressContainer + } + if tracker == nil { + return nil, ErrNilAccountTracker + } + + return &PeerAccount{ + Balance: big.NewInt(0), + addressContainer: addressContainer, + accountTracker: tracker, + dataTrieTracker: NewTrackableDataTrie(nil), + }, nil +} + +// IsInterfaceNil return if there is no value under the interface +func (a *PeerAccount) IsInterfaceNil() bool { + if a == nil { + return true + } + return false +} + +// AddressContainer returns the address associated with the account +func (a *PeerAccount) AddressContainer() AddressContainer { + return a.addressContainer +} + +// SetNonceWithJournal sets the account's nonce, saving the old nonce before changing +func (a *PeerAccount) SetNonceWithJournal(nonce uint64) error { + entry, err := NewJournalEntryNonce(a, a.Nonce) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Nonce = nonce + + return a.accountTracker.SavePeerAccount(a) +} + +//SetNonce saves the nonce to the account +func (a *PeerAccount) SetNonce(nonce uint64) { + a.Nonce = nonce +} + +// GetNonce gets the nonce of the account +func (a *PeerAccount) GetNonce() uint64 { + return a.Nonce +} + +// SetBalanceWithJournal sets the account's balance, saving the old balance before changing +func (a *PeerAccount) SetBalanceWithJournal(balance *big.Int) error { + entry, err := NewJournalEntryBalance(a, a.Balance) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Balance = balance + + return a.accountTracker.SavePeerAccount(a) +} + +//------- code / code hash + +// GetCodeHash returns the code hash associated with this account +func (a *PeerAccount) GetCodeHash() []byte { + return a.CodeHash +} + +// SetCodeHash sets the code hash associated with the account +func (a *PeerAccount) SetCodeHash(codeHash []byte) { + a.CodeHash = codeHash +} + +// SetCodeHashWithJournal sets the account's code hash, saving the old code hash before changing +func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { + entry, err := NewBaseJournalEntryCodeHash(a, a.CodeHash) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CodeHash = codeHash + + return a.accountTracker.SavePeerAccount(a) +} + +// GetCode gets the actual code that needs to be run in the VM +func (a *PeerAccount) GetCode() []byte { + return a.code +} + +// SetCode sets the actual code that needs to be run in the VM +func (a *PeerAccount) SetCode(code []byte) { + a.code = code +} + +//------- data trie / root hash + +// GetRootHash returns the root hash associated with this account +func (a *PeerAccount) GetRootHash() []byte { + return a.RootHash +} + +// SetRootHash sets the root hash associated with the account +func (a *PeerAccount) SetRootHash(roothash []byte) { + a.RootHash = roothash +} + +// SetRootHashWithJournal sets the account's root hash, saving the old root hash before changing +func (a *PeerAccount) SetRootHashWithJournal(rootHash []byte) error { + entry, err := NewBaseJournalEntryRootHash(a, a.RootHash) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.RootHash = rootHash + + return a.accountTracker.SavePeerAccount(a) +} + +// DataTrie returns the trie that holds the current account's data +func (a *PeerAccount) DataTrie() data.Trie { + return a.dataTrieTracker.DataTrie() +} + +// SetDataTrie sets the trie that holds the current account's data +func (a *PeerAccount) SetDataTrie(trie data.Trie) { + a.dataTrieTracker.SetDataTrie(trie) +} + +// DataTrieTracker returns the trie wrapper used in managing the SC data +func (a *PeerAccount) DataTrieTracker() DataTrieTracker { + return a.dataTrieTracker +} + +//TODO add Cap'N'Proto converter funcs diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go new file mode 100644 index 00000000000..7bf2df5b486 --- /dev/null +++ b/data/state/peerAccount_test.go @@ -0,0 +1 @@ +package state diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go new file mode 100644 index 00000000000..7bf2df5b486 --- /dev/null +++ b/data/state/peerJournalEntries.go @@ -0,0 +1 @@ +package state diff --git a/data/state/peerJournalEntries_test.go b/data/state/peerJournalEntries_test.go new file mode 100644 index 00000000000..7bf2df5b486 --- /dev/null +++ b/data/state/peerJournalEntries_test.go @@ -0,0 +1 @@ +package state From ff723b59f2aa817f81e68882bc5032cdf760595a Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 18 Jul 2019 20:25:53 +0900 Subject: [PATCH 030/234] fixes after merge, cleanup. --- api/api.go | 2 +- api/middleware/middleware.go | 1 - cmd/node/factory/structs.go | 3 +- cmd/node/main.go | 5 +- consensus/spos/bls/subroundEndRound.go | 1 - core/indexer/data.go | 30 ++-- crypto/interface.go | 2 +- go.mod | 2 +- go.sum | 6 +- .../frontend/wallet/txInterception_test.go | 2 +- .../block/executingMiniblocks_test.go | 4 +- .../smartContract/executingSCCalls_test.go | 63 +++++---- .../smartContract/testInitilalizer.go | 132 ++++++++++++++---- .../transaction/interceptedBulkTx_test.go | 2 +- integrationTests/vm/iele/vmDeploy_test.go | 2 +- .../vm/iele/vmRunContract_test.go | 4 +- integrationTests/vm/testInitializer.go | 5 +- process/block/interceptedBlockHeader.go | 6 +- process/block/interceptedMetaBlockHeader.go | 2 +- .../interceptors/headerInterceptor_test.go | 8 +- .../metachainHeaderInterceptor_test.go | 2 +- process/mock/multiSigMock.go | 3 +- 22 files changed, 178 insertions(+), 109 deletions(-) diff --git a/api/api.go b/api/api.go index 157eee53332..0bdad6282e6 100644 --- a/api/api.go +++ b/api/api.go @@ -6,11 +6,11 @@ import ( "reflect" "strings" - "github.com/ElrondNetwork/elrond-go/api/vmValues" "github.com/ElrondNetwork/elrond-go/api/address" "github.com/ElrondNetwork/elrond-go/api/middleware" "github.com/ElrondNetwork/elrond-go/api/node" "github.com/ElrondNetwork/elrond-go/api/transaction" + "github.com/ElrondNetwork/elrond-go/api/vmValues" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" diff --git a/api/middleware/middleware.go b/api/middleware/middleware.go index bcfc8a9f8e9..967813bbb58 100644 --- a/api/middleware/middleware.go +++ b/api/middleware/middleware.go @@ -6,7 +6,6 @@ import ( // ElrondHandler interface defines methods that can be used from `elrondFacade` context variable type ElrondHandler interface { - } // WithElrondFacade middleware will set up an ElrondFacade object in the gin context diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index a0d0aa612d6..ab10752dc8b 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -66,7 +66,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm/iele/common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" @@ -1330,7 +1329,7 @@ func newShardBlockProcessorAndTracker( //TODO replace this with a vm factory cryptoHook := hooks.NewVMCryptoHook() - ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, ielecommon.Danse) + ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, endpoint.Danse) scProcessor, err := smartContract.NewSmartContractProcessor( ieleVM, diff --git a/cmd/node/main.go b/cmd/node/main.go index 89921802bb8..6da7042b9ce 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -39,7 +39,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" vmcommon "github.com/ElrondNetwork/elrond-vm-common" - ielecommon "github.com/ElrondNetwork/elrond-vm/iele/common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/google/gops/agent" "github.com/pkg/profile" @@ -726,7 +725,7 @@ func createNodesCoordinator( ) (sharding.NodesCoordinator, error) { shardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) - if err == sharding.ErrNoValidPublicKey { + if err == sharding.ErrPublicKeyNotFoundInGenesis { shardId, err = processDestinationShardAsObserver(settingsConfig) } if err != nil { @@ -1002,7 +1001,7 @@ func startStatisticsMonitor(file *os.File, config config.ResourceStatsConfig, lo func createApiResolver(vmAccountsDB vmcommon.BlockchainHook) (facade.ApiResolver, error) { //TODO replace this with a vm factory cryptoHook := hooks.NewVMCryptoHook() - ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, ielecommon.Danse) + ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, endpoint.Danse) scDataGetter, err := smartContract.NewSCDataGetter(ieleVM) if err != nil { diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index 6de10e4ed93..cfc59ac2098 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/core" ) type subroundEndRound struct { diff --git a/core/indexer/data.go b/core/indexer/data.go index b3603fc1507..21917298663 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -9,21 +9,21 @@ import ( // to be saved for a transaction. It has all the default fields // plus some extra information for ease of search and filter type Transaction struct { - Hash string `json:"hash"` - MBHash string `json:"miniBlockHash"` - BlockHash string `json:"blockHash"` - Nonce uint64 `json:"nonce"` - Value *big.Int `json:"value"` - Receiver string `json:"receiver"` - Sender string `json:"sender"` - ReceiverShard uint32 `json:"receiverShard"` - SenderShard uint32 `json:"senderShard"` - GasPrice uint64 `json:"gasPrice"` - GasLimit uint64 `json:"gasLimit"` - Data string `json:"data"` - Signature string `json:"signature"` - Timestamp time.Duration `json:"timestamp"` - Status string `json:"status"` + Hash string `json:"hash"` + MBHash string `json:"miniBlockHash"` + BlockHash string `json:"blockHash"` + Nonce uint64 `json:"nonce"` + Value *big.Int `json:"value"` + Receiver string `json:"receiver"` + Sender string `json:"sender"` + ReceiverShard uint32 `json:"receiverShard"` + SenderShard uint32 `json:"senderShard"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Data string `json:"data"` + Signature string `json:"signature"` + Timestamp time.Duration `json:"timestamp"` + Status string `json:"status"` } // Block is a structure containing all the fields that need diff --git a/crypto/interface.go b/crypto/interface.go index 01c44ab4155..94fc7c5fd09 100644 --- a/crypto/interface.go +++ b/crypto/interface.go @@ -184,7 +184,7 @@ type MultiSigVerifier interface { // SetAggregatedSig sets the aggregated signature SetAggregatedSig([]byte) error // Verify verifies the aggregated signature - Verify(msg []byte, bitmap []byte, ) error + Verify(msg []byte, bitmap []byte) error } // LowLevelSignerBLS provides functionality to sign and verify BLS single/multi-signatures diff --git a/go.mod b/go.mod index 68775c43052..b69dae292c8 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.12 require ( github.com/360EntSecGroup-Skylar/excelize v1.4.1 github.com/ElrondNetwork/concurrent-map v0.1.2 - github.com/ElrondNetwork/elrond-vm v0.0.7 + github.com/ElrondNetwork/elrond-vm v0.0.10 github.com/ElrondNetwork/elrond-vm-common v0.0.5 github.com/beevik/ntp v0.2.0 github.com/boltdb/bolt v1.3.1 diff --git a/go.sum b/go.sum index d2d512dff0c..c1b30b6c072 100644 --- a/go.sum +++ b/go.sum @@ -7,10 +7,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ElrondNetwork/concurrent-map v0.1.2 h1:mr2sVF2IPDsJO8DNGzCUiNQOJcadHuIRVZn+QFnCBlE= github.com/ElrondNetwork/concurrent-map v0.1.2/go.mod h1:3XwSwn4JHI0lrKxWLZvtp53Emr8BXYTmNQGwcukHJEE= -github.com/ElrondNetwork/elrond-vm v0.0.6 h1:vWULWtPal4DoFkKyevdqZYdzQedEZeoqsrbuUZaIlHM= -github.com/ElrondNetwork/elrond-vm v0.0.6/go.mod h1:A+rISXkeBCTI3F8FtOYMyyp60vqf4OjMhL3u2a2q4sE= -github.com/ElrondNetwork/elrond-vm v0.0.7 h1:gbq5G0jWtdZoTNrzi4H+g5wSz64ie4WjgXcCNcHCpaw= -github.com/ElrondNetwork/elrond-vm v0.0.7/go.mod h1:A+rISXkeBCTI3F8FtOYMyyp60vqf4OjMhL3u2a2q4sE= +github.com/ElrondNetwork/elrond-vm v0.0.10 h1:6a5GFXf9UFxOYEG3LUpLe2zU2ezgpJYKqT/yZCUyB8M= +github.com/ElrondNetwork/elrond-vm v0.0.10/go.mod h1:A+rISXkeBCTI3F8FtOYMyyp60vqf4OjMhL3u2a2q4sE= github.com/ElrondNetwork/elrond-vm-common v0.0.4/go.mod h1:VqCCN0cX0e4D/KDc7MGNV9ElrOsfnjuJnGvcODVjzbk= github.com/ElrondNetwork/elrond-vm-common v0.0.5 h1:AOWJyMYDPtGbwsd2+BpDBYkEQE+QBvsKF0acozrNCko= github.com/ElrondNetwork/elrond-vm-common v0.0.5/go.mod h1:VqCCN0cX0e4D/KDc7MGNV9ElrOsfnjuJnGvcODVjzbk= diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 9c2af022779..3a031ccf7e1 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/core/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go/core/mock" ) func TestInterceptedTxFromFrontendGeneratedParamsWithoutData(t *testing.T) { diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index d01352b945f..740b291ad21 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -49,7 +49,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { _ = advertiser.Close() for _, nodeList := range nodes { for _, n := range nodeList { - _ =n.node.Stop() + _ = n.node.Stop() } } }() @@ -86,7 +86,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { time.Sleep(time.Second * 5) fmt.Println("Step 4. Minting sender addresses...") - createMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) + createMintingForSenders(nodes[senderShard], senderShard, sendersPrivateKeys, valMinting) fmt.Println("Step 5. Proposer creates block body and header with all available transactions...") blockBody, blockHeader := proposeBlock(t, proposerNode, uint32(1)) diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 9223781f19d..7902405a16a 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -21,7 +21,7 @@ var gasPrice = 1 var gasLimit = 1000 var initialValueForInternalVariable = uint64(45) -func createScCallsNodes() (p2p.Messenger, []*testNode) { +func createScCallsNodes() (p2p.Messenger, map[uint32][]*testNode) { advertiser := createMessengerWithKadDht(context.Background(), "") advertiser.Bootstrap() @@ -85,9 +85,10 @@ func haveTime() time.Duration { } // Test within a network of two shards the following situation -// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls within the same shard -// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and the gas -// is substracted from the caller's balance +// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls +// within the same shard +// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and +// the gas is substracted from the caller's balance func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -102,13 +103,15 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() - for _, n := range nodes { - n.node.Stop() + _ = advertiser.Close() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] + proposerNodeShard1 := nodes[0][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -117,7 +120,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { senderAddressBytes := []byte("12345678901234567890123456789012") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, senderMintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, senderMintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -183,14 +186,16 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() - for _, n := range nodes { - n.node.Stop() + _ = advertiser.Close() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] - proposerNodeShard2 := nodes[1] + proposerNodeShard1 := nodes[0][0] + proposerNodeShard2 := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -200,8 +205,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond secondShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, mintingValue) - createMintingForSenders(nodes, receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -257,7 +262,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond storedVal, _ := scAccount.DataTrieTracker().RetrieveValue([]byte("a")) storedValBI := big.NewInt(0).SetBytes(storedVal) - assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable + addValue)), storedValBI) + assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable+addValue)), storedValBI) } // Test within a network of two shards the following situation @@ -280,14 +285,16 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { - advertiser.Close() - for _, n := range nodes { - n.node.Stop() + _ = advertiser.Close() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShardSC := nodes[0] - proposerNodeShardAccount := nodes[1] + proposerNodeShardSC := nodes[0][0] + proposerNodeShardAccount := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -297,8 +304,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond accountShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, scShard, [][]byte{scAccountAddressBytes}, mintingValue) - createMintingForSenders(nodes, accShard, [][]byte{accountShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scAccountAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], accShard, [][]byte{accountShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShardSC, generalRoundNumber, scAccountAddressBytes, accNonce) @@ -317,7 +324,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond scDeploymentAdddress, _ := hex.DecodeString("ca26d3e6152af91949295cc89f419413e08aa04ba2d5e1ed2b199b2ca8aabc2a") // Update the SC account balance so we can call withdraw function - createMintingForSenders(nodes, scShard, [][]byte{scDeploymentAdddress}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scDeploymentAdddress}, mintingValue) // Now that the SC is deployed, we test a call from an account located in the second shard withdrawValue := uint64(100) @@ -392,7 +399,7 @@ func processAndTestSmartContractCallInSender( } func processAndTestSmartContractCallInDestination(t *testing.T, contractCallTx *transaction.Transaction, - proposerNodeShardSC *testNode, scDeploymentAdddress []byte, scShard, accShard uint32, scNonce uint64, ) { + proposerNodeShardSC *testNode, scDeploymentAdddress []byte, scShard, accShard uint32, scNonce uint64) { txBytes, _ := testMarshalizer.Marshal(contractCallTx) txHash := testHasher.Compute(string(txBytes)) blockBody := block.Body{ @@ -400,7 +407,7 @@ func processAndTestSmartContractCallInDestination(t *testing.T, contractCallTx * TxHashes: [][]byte{txHash}, ReceiverShardID: scShard, SenderShardID: accShard, - Type: block.TxBlock, + Type: block.TxBlock, }, } // Before processing make sure to add the tx into the pool of the scShard @@ -446,7 +453,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo // - Initial balance + withdraw value - fees // TODO: Fees and gas should be taken into consideration when the fees are implemented - now we have extra money // from the gas returned since the gas was not substracted in the first place - finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue + uint64(gasLimit - 1*gasPrice)))) + finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue+uint64(gasLimit-1*gasPrice)))) acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) assert.Equal(t, finalValue, acc.Balance) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index d25d8dd48b0..37a0a57755f 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -191,12 +191,14 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, ) ( *node.Node, p2p.Messenger, crypto.PrivateKey, + crypto.PublicKey, dataRetriever.ResolversFinder, process.BlockProcessor, process.TransactionProcessor, @@ -230,6 +232,7 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, messenger, store, testMarshalizer, @@ -239,7 +242,6 @@ func createNetNode( testMultiSig, dPool, testAddressConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -257,7 +259,14 @@ func createNetNode( ) resolversContainer, _ := resolversContainerFactory.Create() resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, @@ -381,7 +390,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + return n, messenger, sk, pk, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -412,33 +421,56 @@ func getConnectableAddress(mes p2p.Messenger) string { return "" } -func displayAndStartNodes(nodes []*testNode) { - for _, n := range nodes { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() +func displayAndStartNodes(nodes map[uint32][]*testNode) { + for _, nodeList := range nodes { - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } } } +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + func createNodes( numOfShards int, nodesPerShard int, serviceID string, -) []*testNode { +) map[uint32][]*testNode { //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 - nodes := make([]*testNode, int(numOfShards)*nodesPerShard+numMetaChainNodes) + nodes := make(map[uint32][]*testNode) + nodesCoordinators := make(map[uint32][]sharding.NodesCoordinator) + nodesPublicKeys := make(map[uint32][]string) - idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { + shardNodesCoordinators := make([]sharding.NodesCoordinator, 0) + shardPubKeys := make([]string, 0) + shardNodes := make([]*testNode, nodesPerShard) + for j := 0; j < nodesPerShard; j++ { testNode := &testNode{ dPool: createTestShardDataPool(), @@ -446,14 +478,25 @@ func createNodes( } shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + ) + shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) + accntAdapter := createAccountsDB() - n, mes, sk, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + n, mes, sk, pk, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, + nodesCoordinator, testNode.shardId, serviceID, ) + pubKeyBytes, _ := pk.ToByteArray() + shardPubKeys = append(shardPubKeys, string(pubKeyBytes)) _ = n.CreateShardedStores() testNode.node = n @@ -499,21 +542,49 @@ func createNodes( &singlesig.SchnorrSigner{}, ) - nodes[idx] = testNode - idx++ + shardNodes[j] = testNode } + + nodes[uint32(shardId)] = shardNodes + nodesCoordinators[uint32(shardId)] = shardNodesCoordinators + nodesPublicKeys[uint32(shardId)] = shardPubKeys } - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - tn := createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - serviceID, - ) + metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) + metaNodesPubKeys := make([]string, 0) + + metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { - idx := i + int(numOfShards)*nodesPerShard - nodes[idx] = tn + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + ) + + metaNodes[i] = createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + ) + + metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) + pkBytes, _ := metaNodes[i].pk.ToByteArray() + metaNodesPubKeys = append(metaNodesPubKeys, string(pkBytes)) + } + + nodes[sharding.MetachainShardId] = metaNodes + nodesCoordinators[sharding.MetachainShardId] = metaNodesCoordinators + nodesPublicKeys[sharding.MetachainShardId] = metaNodesPubKeys + mapValidators := genValidatorsFromPubKeys(nodesPublicKeys) + + for _, shardCoord := range nodesCoordinators { + for j := 0; j < len(shardCoord); j++ { + shardCoord[j].LoadNodesPerShards(mapValidators) + } } return nodes @@ -569,6 +640,7 @@ func createMetaNetNode( dPool dataRetriever.MetaPoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, ) *testNode { @@ -589,13 +661,13 @@ func createMetaNetNode( interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, testHasher, testMultiSig, dPool, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go index 8d365613950..96ae7e2d89e 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go @@ -11,10 +11,10 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" ) func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { diff --git a/integrationTests/vm/iele/vmDeploy_test.go b/integrationTests/vm/iele/vmDeploy_test.go index 9330d49b0aa..1d0099c9e43 100644 --- a/integrationTests/vm/iele/vmDeploy_test.go +++ b/integrationTests/vm/iele/vmDeploy_test.go @@ -42,7 +42,7 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { _, err = accnts.Commit() assert.Nil(t, err) - expectedBalance := big.NewInt(99978648) + expectedBalance := big.NewInt(99922044) vm.TestAccount( t, accnts, diff --git a/integrationTests/vm/iele/vmRunContract_test.go b/integrationTests/vm/iele/vmRunContract_test.go index fc926ff4d99..9b51188d158 100644 --- a/integrationTests/vm/iele/vmRunContract_test.go +++ b/integrationTests/vm/iele/vmRunContract_test.go @@ -61,7 +61,7 @@ func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { _, err = accnts.Commit() assert.Nil(t, err) - expectedBalance := big.NewInt(0).SetUint64(99973491) + expectedBalance := big.NewInt(0).SetUint64(99895339) vm.TestAccount( t, accnts, @@ -130,7 +130,7 @@ func TestRunWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { _, err = accnts.Commit() assert.Nil(t, err) - expectedBalance := big.NewInt(0).SetUint64(99978598) + expectedBalance := big.NewInt(0).SetUint64(99921994) //following operations happened: deploy and call, deploy succeed, call failed, transfer has been reverted, gas consumed vm.TestAccount( t, diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 4bf56d46600..70f5f0e150e 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -19,8 +19,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - ielecommon "github.com/ElrondNetwork/elrond-vm/iele/common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/stretchr/testify/assert" ) @@ -102,7 +101,7 @@ func CreateOneSCExecutorMockVM(accnts state.AccountsAdapter) vmcommon.VMExecutio func CreateVMAndBlockchainHook(accnts state.AccountsAdapter) (vmcommon.VMExecutionHandler, *hooks.VMAccountsDB) { blockChainHook, _ := hooks.NewVMAccountsDB(accnts, addrConv) cryptoHook := hooks.NewVMCryptoHook() - vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, ielecommon.Danse) + vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, endpoint.Danse) //Uncomment this to enable trace printing of the vm //vm.SetTracePretty() diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 0244089d5b6..8d762ad82fe 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -3,7 +3,7 @@ package block import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -132,10 +132,6 @@ func (inHdr *InterceptedHeader) VerifySig() error { return err } - for i, pubKey := range consensusPubKeys { - log.Info(fmt.Sprintf("[%d]: %s\n", i, core.ToHex([]byte(pubKey)))) - } - verifier, err := inHdr.multiSigVerifier.Create(consensusPubKeys, 0) if err != nil { return err diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index bf0c94a89c1..6dc385f2e38 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -3,9 +3,9 @@ package block import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/marshal" ) // InterceptedHeader represents the wrapper over HeaderWrapper struct. diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index a3317ff486f..303168a3cda 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -3,21 +3,21 @@ package interceptors_test import ( "bytes" "errors" + "fmt" + "math/big" "sync" "testing" "time" + "github.com/ElrondNetwork/elrond-go/consensus" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/consensus" - "math/big" - "fmt" + "github.com/stretchr/testify/assert" ) var durTimeout = time.Duration(time.Second) diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index 65a528e0c82..726de1683df 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -170,7 +170,7 @@ func TestNewMetachainHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T mhi, err := interceptors.NewMetachainHeaderInterceptor( &mock.MarshalizerMock{}, metachainHeaders, - &mock.Uint64CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, metachainStorer, mock.NewMultiSigner(), mock.HasherMock{}, diff --git a/process/mock/multiSigMock.go b/process/mock/multiSigMock.go index 6e12a9b2388..e2e7a83ad79 100644 --- a/process/mock/multiSigMock.go +++ b/process/mock/multiSigMock.go @@ -1,9 +1,10 @@ package mock import ( + "bytes" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/hashing" - "bytes" ) // BelNevMock is used to mock belare neven multisignature scheme From e5c361372267e3f15e32354df6d85c7fbc4f8b46 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Thu, 18 Jul 2019 16:02:09 +0300 Subject: [PATCH 031/234] peer account update --- data/state/errors.go | 12 +++++++++ data/state/peerAccount.go | 54 +++++++++++++++++++++++++++++++-------- 2 files changed, 56 insertions(+), 10 deletions(-) diff --git a/data/state/errors.go b/data/state/errors.go index c801162cc21..635f40ab18c 100644 --- a/data/state/errors.go +++ b/data/state/errors.go @@ -130,3 +130,15 @@ var ErrBech32ConvertError = errors.New("can't convert bech32 string") // ErrBech32WrongAddr signals that the string provided might not be in bech32 format var ErrBech32WrongAddr = errors.New("wrong bech32 string") + +// ErrNilStake signals that the provided stake is nil +var ErrNilStake = errors.New("stake is nil") + +// ErrNilAddress signals that the provided addres is nil +var ErrNilAddress = errors.New("address is nil") + +// ErrNilSchnorrPublicKey signals that the provided schnorr public is nil +var ErrNilSchnorrPublicKey = errors.New("schnorr public key is nil") + +// ErrNilBLSPublicKey signals that the provided BLS public key is nil +var ErrNilBLSPublicKey = errors.New("bls public key is nil") diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 09f2f5fa8b3..984f888c35f 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -11,28 +11,62 @@ type PeerAccount struct { BLSPublicKey []byte SchnorrPublicKey []byte Address []byte - JailStartTime uint64 - JailEndTime uint64 - CurrentShardId uint32 - NextShardId uint32 Stake *big.Int + JailTime uint64 + PastJailTimes uint64 + + CurrentShardId uint32 + NextShardId uint32 + NodeInWaitingList bool + + Uptime uint64 + NrSignedBlocks uint32 + NrMissedBlocks uint32 + LeaderSuccessRate uint32 + + Rating uint32 + RootHash []byte + Nonce uint64 + addressContainer AddressContainer accountTracker AccountTracker dataTrieTracker DataTrieTracker } // NewPeerAccount creates new simple account wrapper for an PeerAccountContainer (that has just been initialized) -func NewPeerAccount(addressContainer AddressContainer, tracker AccountTracker) (*PeerAccount, error) { +func NewPeerAccount( + addressContainer AddressContainer, + tracker AccountTracker, + stake *big.Int, + address []byte, + schnorr []byte, + bls []byte, +) (*PeerAccount, error) { if addressContainer == nil { return nil, ErrNilAddressContainer } if tracker == nil { return nil, ErrNilAccountTracker } + if stake == nil { + return nil, ErrNilStake + } + if address == nil { + return nil, ErrNilAddress + } + if schnorr == nil { + return nil, ErrNilSchnorrPublicKey + } + if bls == nil { + return nil, ErrNilBLSPublicKey + } return &PeerAccount{ - Balance: big.NewInt(0), + Stake: big.NewInt(0).Set(stake), + Address: address, + SchnorrPublicKey: schnorr, + BLSPublicKey: bls, addressContainer: addressContainer, accountTracker: tracker, dataTrieTracker: NewTrackableDataTrie(nil), @@ -62,7 +96,7 @@ func (a *PeerAccount) SetNonceWithJournal(nonce uint64) error { a.accountTracker.Journalize(entry) a.Nonce = nonce - return a.accountTracker.SavePeerAccount(a) + return a.accountTracker.SaveAccount(a) } //SetNonce saves the nonce to the account @@ -85,7 +119,7 @@ func (a *PeerAccount) SetBalanceWithJournal(balance *big.Int) error { a.accountTracker.Journalize(entry) a.Balance = balance - return a.accountTracker.SavePeerAccount(a) + return a.accountTracker.SaveAccount(a) } //------- code / code hash @@ -110,7 +144,7 @@ func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { a.accountTracker.Journalize(entry) a.CodeHash = codeHash - return a.accountTracker.SavePeerAccount(a) + return a.accountTracker.SaveAccount(a) } // GetCode gets the actual code that needs to be run in the VM @@ -145,7 +179,7 @@ func (a *PeerAccount) SetRootHashWithJournal(rootHash []byte) error { a.accountTracker.Journalize(entry) a.RootHash = rootHash - return a.accountTracker.SavePeerAccount(a) + return a.accountTracker.SaveAccount(a) } // DataTrie returns the trie that holds the current account's data From 1a56e7a67ef845e1deb45f6387fc6c4e32eb9029 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 13:25:37 +0300 Subject: [PATCH 032/234] peer account update --- data/state/peerAccount.go | 229 ++++++++++++++++++++++++++++++++------ 1 file changed, 194 insertions(+), 35 deletions(-) diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 984f888c35f..e3b1cebbf53 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -6,6 +6,21 @@ import ( "github.com/ElrondNetwork/elrond-go/data" ) +type TimeStamp struct { + Epoch uint64 + Round uint64 +} + +type TimePeriod struct { + StartTime TimeStamp + EndTime TimeStamp +} + +type SignRate struct { + NrSuccess uint32 + NrFailure uint32 +} + // PeerAccount is the struct used in serialization/deserialization type PeerAccount struct { BLSPublicKey []byte @@ -13,17 +28,15 @@ type PeerAccount struct { Address []byte Stake *big.Int - JailTime uint64 - PastJailTimes uint64 + JailTime TimePeriod + PastJailTimes []TimePeriod CurrentShardId uint32 NextShardId uint32 NodeInWaitingList bool - Uptime uint64 - NrSignedBlocks uint32 - NrMissedBlocks uint32 - LeaderSuccessRate uint32 + ValidatorSuccessRate SignRate + LeaderSuccessRate SignRate Rating uint32 RootHash []byte @@ -109,52 +122,27 @@ func (a *PeerAccount) GetNonce() uint64 { return a.Nonce } -// SetBalanceWithJournal sets the account's balance, saving the old balance before changing -func (a *PeerAccount) SetBalanceWithJournal(balance *big.Int) error { - entry, err := NewJournalEntryBalance(a, a.Balance) - if err != nil { - return err - } - - a.accountTracker.Journalize(entry) - a.Balance = balance - - return a.accountTracker.SaveAccount(a) -} - -//------- code / code hash - // GetCodeHash returns the code hash associated with this account func (a *PeerAccount) GetCodeHash() []byte { - return a.CodeHash + return nil } // SetCodeHash sets the code hash associated with the account func (a *PeerAccount) SetCodeHash(codeHash []byte) { - a.CodeHash = codeHash } // SetCodeHashWithJournal sets the account's code hash, saving the old code hash before changing func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { - entry, err := NewBaseJournalEntryCodeHash(a, a.CodeHash) - if err != nil { - return err - } - - a.accountTracker.Journalize(entry) - a.CodeHash = codeHash - - return a.accountTracker.SaveAccount(a) + return nil } // GetCode gets the actual code that needs to be run in the VM func (a *PeerAccount) GetCode() []byte { - return a.code + return nil } // SetCode sets the actual code that needs to be run in the VM func (a *PeerAccount) SetCode(code []byte) { - a.code = code } //------- data trie / root hash @@ -197,4 +185,175 @@ func (a *PeerAccount) DataTrieTracker() DataTrieTracker { return a.dataTrieTracker } -//TODO add Cap'N'Proto converter funcs +// SetAddressWithJournal sets the account's address, saving the old address before changing +func (a *PeerAccount) SetAddressWithJournal(address []byte) error { + entry, err := NewJournalEntryAddress(a, a.Address) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Address = address + + return a.accountTracker.SaveAccount(a) +} + +// SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing +func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { + entry, err := NewJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.SchnorrPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing +func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { + entry, err := NewJournalEntryBLSPublicKey(a, a.BLSPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.BLSPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetStakeWithJournal sets the account's stake, saving the old stake before changing +func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { + entry, err := NewJournalEntryStake(a, a.Stake) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Stake = stake + + return a.accountTracker.SaveAccount(a) +} + +// SetJailTimeWithJournal sets the account's jail time, saving the old state before changing +func (a *PeerAccount) SetJailTimeWithJournal(jailTime TimePeriod) error { + entry, err := NewJournalEntryJailTime(a, a.JailTime) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.JailTime = jailTime + + return a.accountTracker.SaveAccount(a) +} + +// SetCurrentShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetCurrentShardIdWithJournal(shId uint32) error { + entry, err := NewJournalEntryCurrentShardId(a, a.CurrentShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CurrentShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNextShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetNextShardIdWithJournal(shId uint32) error { + entry, err := NewJournalEntryNexttShardId(a, a.NextShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NextShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNodeInWaitingListWithJournal sets the account's nodes status whether in waiting list, saving the old state before +func (a *PeerAccount) SetNodeInWaitingListWithJournal(nodeInWaitingList bool) error { + entry, err := NewJournalEntryNodeInWaitingList(a, a.NodeInWaitingList) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NodeInWaitingList = nodeInWaitingList + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseValidatorSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseValidatorSuccessRateWithJournal() error { + entry, err := NewJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseValidatorSuccessRateWithJournal increases the account's number of missed signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { + entry, err := NewJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrFailure-- + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseLeaderSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseLeaderSuccessRateWithJournal() error { + entry, err := NewJournalEntryValidatorSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseLeaderSuccessRateWithJournal increases the account's number of missing signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { + entry, err := NewJournalEntryValidatorSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrFailure++ + + return a.accountTracker.SaveAccount(a) +} + +// SetRatingWithJournal sets the account's rating id, saving the old state before changing +func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { + entry, err := NewJournalEntryRating(a, a.Rating) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Rating = rating + + return a.accountTracker.SaveAccount(a) +} From 265ede097b2e549e6f63027717bfaba3bc290b0e Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 13:34:39 +0300 Subject: [PATCH 033/234] fix after review. --- cmd/node/factory/structs.go | 2 +- process/transaction/process.go | 25 ++++++++++++++++--------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7e89e56df5c..c89d8afb917 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/ElrondNetwork/elrond-go/data/address" "io" "math/big" "path/filepath" @@ -28,6 +27,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/address" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/data/state" diff --git a/process/transaction/process.go b/process/transaction/process.go index 4e90697c128..0c8c3772d6a 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -87,11 +87,6 @@ func (txProc *txProcessor) ProcessTransaction(tx data.TransactionHandler, roundI return process.ErrNilTransaction } - currTxFee, ok := tx.(*feeTx.FeeTx) - if ok { - txProc.txFeeHandler.AddTxFeeFromBlock(currTxFee) - } - adrSrc, adrDst, err := txProc.getAddresses(tx) if err != nil { return err @@ -104,16 +99,28 @@ func (txProc *txProcessor) ProcessTransaction(tx data.TransactionHandler, roundI switch txType { case process.MoveBalance: - currTx := tx.(*transaction.Transaction) + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } return txProc.processMoveBalance(currTx, adrSrc, adrDst) case process.SCDeployment: - currTx := tx.(*transaction.Transaction) + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } return txProc.processSCDeployment(currTx, adrSrc, roundIndex) case process.SCInvoking: - currTx := tx.(*transaction.Transaction) + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } return txProc.processSCInvoking(currTx, adrSrc, adrDst, roundIndex) case process.TxFee: - currTxFee := tx.(*feeTx.FeeTx) + currTxFee, ok := tx.(*feeTx.FeeTx) + if !ok { + return process.ErrWrongTypeAssertion + } return txProc.processAccumulatedTxFees(currTxFee, adrSrc) } From bdf242df87902f5b82057b4a264a706ced7ac2f4 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 14:27:45 +0300 Subject: [PATCH 034/234] fix after review. --- cmd/node/factory/structs.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 5edc4c685f3..29a8fc5e916 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -67,7 +67,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm/iele/common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" @@ -1285,7 +1284,7 @@ func newShardBlockProcessorAndTracker( //TODO replace this with a vm factory cryptoHook := hooks.NewVMCryptoHook() - ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, ielecommon.Danse) + ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, endpoint.ElrondTestnet) scProcessor, err := smartContract.NewSmartContractProcessor( ieleVM, From 96b04c8c0a36595cce6edfacf77066cadfe046ce Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 15:18:27 +0300 Subject: [PATCH 035/234] peer account and journalize entries --- data/state/peerAccount.go | 26 +-- data/state/peerJournalEntries.go | 285 +++++++++++++++++++++++++++++++ 2 files changed, 298 insertions(+), 13 deletions(-) diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index e3b1cebbf53..f8ec8c6336e 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -187,7 +187,7 @@ func (a *PeerAccount) DataTrieTracker() DataTrieTracker { // SetAddressWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetAddressWithJournal(address []byte) error { - entry, err := NewJournalEntryAddress(a, a.Address) + entry, err := NewPeerJournalEntryAddress(a, a.Address) if err != nil { return err } @@ -200,7 +200,7 @@ func (a *PeerAccount) SetAddressWithJournal(address []byte) error { // SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { - entry, err := NewJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) + entry, err := NewPeerJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) if err != nil { return err } @@ -213,7 +213,7 @@ func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { // SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { - entry, err := NewJournalEntryBLSPublicKey(a, a.BLSPublicKey) + entry, err := NewPeerJournalEntryBLSPublicKey(a, a.BLSPublicKey) if err != nil { return err } @@ -226,7 +226,7 @@ func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { // SetStakeWithJournal sets the account's stake, saving the old stake before changing func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { - entry, err := NewJournalEntryStake(a, a.Stake) + entry, err := NewPeerJournalEntryStake(a, a.Stake) if err != nil { return err } @@ -239,7 +239,7 @@ func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { // SetJailTimeWithJournal sets the account's jail time, saving the old state before changing func (a *PeerAccount) SetJailTimeWithJournal(jailTime TimePeriod) error { - entry, err := NewJournalEntryJailTime(a, a.JailTime) + entry, err := NewPeerJournalEntryJailTime(a, a.JailTime) if err != nil { return err } @@ -252,7 +252,7 @@ func (a *PeerAccount) SetJailTimeWithJournal(jailTime TimePeriod) error { // SetCurrentShardIdWithJournal sets the account's shard id, saving the old state before changing func (a *PeerAccount) SetCurrentShardIdWithJournal(shId uint32) error { - entry, err := NewJournalEntryCurrentShardId(a, a.CurrentShardId) + entry, err := NewPeerJournalEntryCurrentShardId(a, a.CurrentShardId) if err != nil { return err } @@ -265,7 +265,7 @@ func (a *PeerAccount) SetCurrentShardIdWithJournal(shId uint32) error { // SetNextShardIdWithJournal sets the account's shard id, saving the old state before changing func (a *PeerAccount) SetNextShardIdWithJournal(shId uint32) error { - entry, err := NewJournalEntryNexttShardId(a, a.NextShardId) + entry, err := NewPeerJournalEntryNextShardId(a, a.NextShardId) if err != nil { return err } @@ -278,7 +278,7 @@ func (a *PeerAccount) SetNextShardIdWithJournal(shId uint32) error { // SetNodeInWaitingListWithJournal sets the account's nodes status whether in waiting list, saving the old state before func (a *PeerAccount) SetNodeInWaitingListWithJournal(nodeInWaitingList bool) error { - entry, err := NewJournalEntryNodeInWaitingList(a, a.NodeInWaitingList) + entry, err := NewPeerJournalEntryInWaitingList(a, a.NodeInWaitingList) if err != nil { return err } @@ -292,7 +292,7 @@ func (a *PeerAccount) SetNodeInWaitingListWithJournal(nodeInWaitingList bool) er // IncreaseValidatorSuccessRateWithJournal increases the account's number of successful signing, // saving the old state before changing func (a *PeerAccount) IncreaseValidatorSuccessRateWithJournal() error { - entry, err := NewJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) if err != nil { return err } @@ -306,7 +306,7 @@ func (a *PeerAccount) IncreaseValidatorSuccessRateWithJournal() error { // DecreaseValidatorSuccessRateWithJournal increases the account's number of missed signing, // saving the old state before changing func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { - entry, err := NewJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) if err != nil { return err } @@ -320,7 +320,7 @@ func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { // IncreaseLeaderSuccessRateWithJournal increases the account's number of successful signing, // saving the old state before changing func (a *PeerAccount) IncreaseLeaderSuccessRateWithJournal() error { - entry, err := NewJournalEntryValidatorSuccessRate(a, a.LeaderSuccessRate) + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) if err != nil { return err } @@ -334,7 +334,7 @@ func (a *PeerAccount) IncreaseLeaderSuccessRateWithJournal() error { // DecreaseLeaderSuccessRateWithJournal increases the account's number of missing signing, // saving the old state before changing func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { - entry, err := NewJournalEntryValidatorSuccessRate(a, a.LeaderSuccessRate) + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) if err != nil { return err } @@ -347,7 +347,7 @@ func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { // SetRatingWithJournal sets the account's rating id, saving the old state before changing func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { - entry, err := NewJournalEntryRating(a, a.Rating) + entry, err := NewPeerJournalEntryRating(a, a.Rating) if err != nil { return err } diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go index 7bf2df5b486..9d714cb541b 100644 --- a/data/state/peerJournalEntries.go +++ b/data/state/peerJournalEntries.go @@ -1 +1,286 @@ package state + +import "math/big" + +//------- PeerJournalEntryAddress + +// PeerJournalEntryAddress is used to revert a round change +type PeerJournalEntryAddress struct { + account *PeerAccount + oldAddress []byte +} + +// NewPeerJournalEntryAddress outputs a new PeerJournalEntry implementation used to revert a round change +func NewPeerJournalEntryAddress(account *PeerAccount, oldAddress []byte) (*PeerJournalEntryAddress, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryAddress{ + account: account, + oldAddress: oldAddress, + }, nil +} + +// Revert applies undo operation +func (jen *PeerJournalEntryAddress) Revert() (AccountHandler, error) { + jen.account.Address = jen.oldAddress + + return jen.account, nil +} + +//------- PeerJournalEntrySchnorrPublicKey + +// PeerJournalEntrySchnorrPublicKey is used to revert a round change +type PeerJournalEntrySchnorrPublicKey struct { + account *PeerAccount + oldSchnorrPubKey []byte +} + +// NewPeerJournalEntryAddress outputs a new PeerJournalEntry implementation used to revert a round change +func NewPeerJournalEntrySchnorrPublicKey(account *PeerAccount, oldSchnorrPubKey []byte) (*PeerJournalEntrySchnorrPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntrySchnorrPublicKey{ + account: account, + oldSchnorrPubKey: oldSchnorrPubKey, + }, nil +} + +// Revert applies undo operation +func (jen *PeerJournalEntrySchnorrPublicKey) Revert() (AccountHandler, error) { + jen.account.SchnorrPublicKey = jen.oldSchnorrPubKey + + return jen.account, nil +} + +//------- PeerJournalEntryBLSPublicKey + +// PeerJournalEntryBLSPublicKey is used to revert a round change +type PeerJournalEntryBLSPublicKey struct { + account *PeerAccount + oldBLSPubKey []byte +} + +// NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntry implementation used to revert a round change +func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryBLSPublicKey{ + account: account, + oldBLSPubKey: oldPubKey, + }, nil +} + +// Revert applies undo operation +func (jen *PeerJournalEntryBLSPublicKey) Revert() (AccountHandler, error) { + jen.account.BLSPublicKey = jen.oldBLSPubKey + + return jen.account, nil +} + +//------- PeerJournalEntryStake + +// PeerJournalEntryStake is used to revert a stake change +type PeerJournalEntryStake struct { + account *PeerAccount + oldStake *big.Int +} + +// NewPeerJournalEntryStake outputs a new PeerJournalEntry implementation used to revert a stake change +func NewPeerJournalEntryStake(account *PeerAccount, oldStake *big.Int) (*PeerJournalEntryStake, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryStake{ + account: account, + oldStake: oldStake, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryStake) Revert() (AccountHandler, error) { + jeb.account.Stake = jeb.oldStake + + return jeb.account, nil +} + +// PeerJournalEntryJailTime is used to revert a balance change +type PeerJournalEntryJailTime struct { + account *PeerAccount + oldPeriod TimePeriod +} + +// NewPeerJournalEntryJailTime outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) (*PeerJournalEntryJailTime, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryJailTime{ + account: account, + oldPeriod: oldJailTime, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { + jeb.account.JailTime = jeb.oldPeriod + + return jeb.account, nil +} + +// PeerJournalEntryCurrentShardId is used to revert a shardId change +type PeerJournalEntryCurrentShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryCurrentShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryCurrentShardId{ + account: account, + oldShardId: oldShId, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryCurrentShardId) Revert() (AccountHandler, error) { + jeb.account.CurrentShardId = jeb.oldShardId + + return jeb.account, nil +} + +// PeerJournalEntryNextShardId is used to revert a shardId change +type PeerJournalEntryNextShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryNextShardId outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryNextShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryNextShardId{ + account: account, + oldShardId: oldShId, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { + jeb.account.NextShardId = jeb.oldShardId + + return jeb.account, nil +} + +// PeerJournalEntryInWaitingList is used to revert a shardId change +type PeerJournalEntryInWaitingList struct { + account *PeerAccount + oldInWaitingList bool +} + +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryInWaitingList(account *PeerAccount, oldInWaitingList bool) (*PeerJournalEntryInWaitingList, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryInWaitingList{ + account: account, + oldInWaitingList: oldInWaitingList, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { + jeb.account.NodeInWaitingList = jeb.oldInWaitingList + + return jeb.account, nil +} + +// PeerJournalEntryValidatorSuccessRate is used to revert a success rate change +type PeerJournalEntryValidatorSuccessRate struct { + account *PeerAccount + oldSuccessRate SignRate +} + +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryValidatorSuccessRate(account *PeerAccount, oldSuccessRate SignRate) (*PeerJournalEntryValidatorSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryValidatorSuccessRate{ + account: account, + oldSuccessRate: oldSuccessRate, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { + jeb.account.ValidatorSuccessRate = jeb.oldSuccessRate + + return jeb.account, nil +} + +// PeerJournalEntryLeaderSuccessRate is used to revert a success rate change +type PeerJournalEntryLeaderSuccessRate struct { + account *PeerAccount + oldSuccessRate SignRate +} + +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryLeaderSuccessRate(account *PeerAccount, oldSuccessRate SignRate) (*PeerJournalEntryLeaderSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryLeaderSuccessRate{ + account: account, + oldSuccessRate: oldSuccessRate, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { + jeb.account.LeaderSuccessRate = jeb.oldSuccessRate + + return jeb.account, nil +} + +// PeerJournalEntryRating is used to revert a rating change +type PeerJournalEntryRating struct { + account *PeerAccount + oldRating uint32 +} + +// NewPeerJournalEntryRating outputs a new PeerJournalEntry implementation used to revert a state change +func NewPeerJournalEntryRating(account *PeerAccount, oldRating uint32) (*PeerJournalEntryRating, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryRating{ + account: account, + oldRating: oldRating, + }, nil +} + +// Revert applies undo operation +func (jeb *PeerJournalEntryRating) Revert() (AccountHandler, error) { + jeb.account.Rating = jeb.oldRating + + return jeb.account, nil +} From 0a34d8fa8a9aa5a356099da6f07eef75fb17a9fc Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 16:11:48 +0300 Subject: [PATCH 036/234] added fees to burn --- data/address/specialAddresses.go | 30 ++++++++++++++----- data/errors.go | 4 +-- .../mock/specialAddressHandlerMock.go | 21 +++++++++++-- process/interface.go | 3 ++ process/mock/specialAddressHandlerMock.go | 21 +++++++++++-- process/unsigned/feeTxHandler.go | 20 ++++++++++--- process/unsigned/feeTxHandler_test.go | 6 ++-- 7 files changed, 84 insertions(+), 21 deletions(-) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index d8b109262b2..733049e0f00 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -8,7 +8,8 @@ import ( type specialAddresses struct { elrond []byte - ownAddress []byte + leaderAddress []byte + burnAddress []byte adrConv state.AddressConverter shardCoordinator sharding.Coordinator } @@ -16,15 +17,15 @@ type specialAddresses struct { // NewSpecialAddressHolder creates a special address holder func NewSpecialAddressHolder( elrond []byte, - ownAddress []byte, + burnAddress []byte, adrConv state.AddressConverter, shardCoordinator sharding.Coordinator, ) (*specialAddresses, error) { if elrond == nil { return nil, data.ErrNilElrondAddress } - if ownAddress == nil { - return nil, data.ErrNilOwnAddress + if burnAddress == nil { + return nil, data.ErrNilBurnAddress } if adrConv == nil { return nil, data.ErrNilAddressConverter @@ -35,7 +36,7 @@ func NewSpecialAddressHolder( sp := &specialAddresses{ elrond: elrond, - ownAddress: ownAddress, + burnAddress: burnAddress, adrConv: adrConv, shardCoordinator: shardCoordinator, } @@ -43,14 +44,29 @@ func NewSpecialAddressHolder( return sp, nil } +// SetElrondCommunityAddress sets elrond address +func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { + sp.elrond = elrond +} + // ElrondCommunityAddress provides elrond address func (sp *specialAddresses) ElrondCommunityAddress() []byte { return sp.elrond } -// OwnAddress provides own address +// BurnAddress provides burn address +func (sp *specialAddresses) BurnAddress() []byte { + return sp.burnAddress +} + +// SetLeaderAddress provides leaders address +func (sp *specialAddresses) SetLeaderAddress(leader []byte) { + sp.leaderAddress = leader +} + +// LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - return sp.ownAddress + return sp.leaderAddress } // ShardIdForAddress calculates shard id for address diff --git a/data/errors.go b/data/errors.go index 110a1da5250..b1e0620102f 100644 --- a/data/errors.go +++ b/data/errors.go @@ -31,8 +31,8 @@ var ErrWrongTypeAssertion = errors.New("wrong type assertion") // ErrNilElrondAddress signals that nil elrond address was provided var ErrNilElrondAddress = errors.New("nil elrond address") -// ErrNilOwnAddress signals that nil own address was provided -var ErrNilOwnAddress = errors.New("nil own address") +// ErrNilBurnAddress signals that nil burn address was provided +var ErrNilBurnAddress = errors.New("nil burn address") // ErrNilAddressConverter signals that nil address converter was provided var ErrNilAddressConverter = errors.New("nil address converter") diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 734df59496d..c9848b44ddd 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -2,10 +2,25 @@ package mock type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte - OwnAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) uint32 } +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetLeaderAddress(leader []byte) { +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn") + } + + return sh.BurnAddressCalled() +} + func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { if sh.ElrondCommunityAddressCalled == nil { return []byte("elrond") @@ -15,11 +30,11 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.OwnAddressCalled == nil { + if sh.LeaderAddressCalled == nil { return []byte("leader") } - return sh.OwnAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { diff --git a/process/interface.go b/process/interface.go index f9f0161fe1e..73cc70ada41 100644 --- a/process/interface.go +++ b/process/interface.go @@ -83,8 +83,11 @@ type UnsignedTxHandler interface { // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { + SetElrondCommunityAddress(elrond []byte) ElrondCommunityAddress() []byte + SetLeaderAddress(leader []byte) LeaderAddress() []byte + BurnAddress() []byte ShardIdForAddress([]byte) uint32 } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 734df59496d..c9848b44ddd 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -2,10 +2,25 @@ package mock type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte - OwnAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) uint32 } +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetLeaderAddress(leader []byte) { +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn") + } + + return sh.BurnAddressCalled() +} + func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { if sh.ElrondCommunityAddressCalled == nil { return []byte("elrond") @@ -15,11 +30,11 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.OwnAddressCalled == nil { + if sh.LeaderAddressCalled == nil { return []byte("leader") } - return sh.OwnAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 26f22a0bf43..fd1154b0c2b 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -12,8 +12,9 @@ import ( "sync" ) -const communityPercentage = 0.1 // 10 = 100%, 0 = 0% -const leaderPercentage = 0.4 // 10 = 100%, 0 = 0% +const communityPercentage = 0.1 // 1 = 100%, 0 = 0% +const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% +const burnPercentage = 0.5 // 1 = 100%, 0 = 0% type feeTxHandler struct { address process.SpecialAddressHandler @@ -154,6 +155,15 @@ func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { return currTx } +func (ftxh *feeTxHandler) createBurnTx(totalGathered *big.Int) *feeTx.FeeTx { + currTx := &feeTx.FeeTx{} + + currTx.Value = getPercentageOfValue(totalGathered, burnPercentage) + currTx.RcvAddr = ftxh.address.BurnAddress() + + return currTx +} + func (ftxh *feeTxHandler) createCommunityTx(totalGathered *big.Int) *feeTx.FeeTx { currTx := &feeTx.FeeTx{} @@ -181,10 +191,12 @@ func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { leaderTx := ftxh.createLeaderTx(totalFee) communityTx := ftxh.createCommunityTx(totalFee) + burnTx := ftxh.createBurnTx(totalFee) currFeeTxs := make([]data.TransactionHandler, 0) currFeeTxs = append(currFeeTxs, leaderTx) currFeeTxs = append(currFeeTxs, communityTx) + currFeeTxs = append(currFeeTxs, burnTx) ftxh.feeTxs = make([]*feeTx.FeeTx, 0) @@ -207,11 +219,11 @@ func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { for _, value := range calculatedFeeTxs { totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - commTxFromBlock, ok := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] + txFromBlock, ok := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] if !ok { return process.ErrTxsFeesDoesNotMatch } - if commTxFromBlock.Value.Cmp(value.GetValue()) != 0 { + if txFromBlock.Value.Cmp(value.GetValue()) != 0 { return process.ErrTxsFeesDoesNotMatch } } diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/feeTxHandler_test.go index 4f522855590..8422784605f 100644 --- a/process/unsigned/feeTxHandler_test.go +++ b/process/unsigned/feeTxHandler_test.go @@ -163,12 +163,13 @@ func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) txs = th.CreateAllUTxs() - assert.Equal(t, 2, len(txs)) + assert.Equal(t, 3, len(txs)) totalSum := txs[0].GetValue().Uint64() totalSum += txs[1].GetValue().Uint64() + totalSum += txs[2].GetValue().Uint64() - assert.Equal(t, currTxFee.Uint64()/2, totalSum) + assert.Equal(t, currTxFee.Uint64(), totalSum) } func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { @@ -215,6 +216,7 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) + th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}) err = th.VerifyCreatedUTxs() assert.Nil(t, err) From 140b6548fc144468e4293232cce7d77ba8a9ee36 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Fri, 19 Jul 2019 18:02:45 +0300 Subject: [PATCH 037/234] added fees to burn --- data/state/factory/peerAccountCreator.go | 21 + data/state/peerAccount.go | 41 +- data/state/peerAccount_test.go | 593 ++++++++++++++++++++++- data/state/peerAccountsDB.go | 1 + 4 files changed, 630 insertions(+), 26 deletions(-) create mode 100644 data/state/peerAccountsDB.go diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go index 7312cd2e2d8..c81ed74b129 100644 --- a/data/state/factory/peerAccountCreator.go +++ b/data/state/factory/peerAccountCreator.go @@ -1 +1,22 @@ package factory + +import "github.com/ElrondNetwork/elrond-go/data/state" + +// MetaAccountCreator has a method to create a new meta accound +type PeerAccountCreator struct { +} + +// NewPeerAccountCreator creates a meta account creator +func NewPeerAccountCreator() state.AccountFactory { + return &PeerAccountCreator{} +} + +// CreateAccount calls the new Account creator and returns the result +func (c *PeerAccountCreator) CreateAccount(address state.AddressContainer, tracker state.AccountTracker) (state.AccountHandler, error) { + account, err := state.NewPeerAccount(address, tracker) + if err != nil { + return nil, err + } + + return account, nil +} diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index f8ec8c6336e..45dbd4a8cb9 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -38,11 +38,14 @@ type PeerAccount struct { ValidatorSuccessRate SignRate LeaderSuccessRate SignRate + CodeHash []byte + Rating uint32 RootHash []byte Nonce uint64 addressContainer AddressContainer + code []byte accountTracker AccountTracker dataTrieTracker DataTrieTracker } @@ -51,10 +54,6 @@ type PeerAccount struct { func NewPeerAccount( addressContainer AddressContainer, tracker AccountTracker, - stake *big.Int, - address []byte, - schnorr []byte, - bls []byte, ) (*PeerAccount, error) { if addressContainer == nil { return nil, ErrNilAddressContainer @@ -62,24 +61,8 @@ func NewPeerAccount( if tracker == nil { return nil, ErrNilAccountTracker } - if stake == nil { - return nil, ErrNilStake - } - if address == nil { - return nil, ErrNilAddress - } - if schnorr == nil { - return nil, ErrNilSchnorrPublicKey - } - if bls == nil { - return nil, ErrNilBLSPublicKey - } return &PeerAccount{ - Stake: big.NewInt(0).Set(stake), - Address: address, - SchnorrPublicKey: schnorr, - BLSPublicKey: bls, addressContainer: addressContainer, accountTracker: tracker, dataTrieTracker: NewTrackableDataTrie(nil), @@ -124,29 +107,37 @@ func (a *PeerAccount) GetNonce() uint64 { // GetCodeHash returns the code hash associated with this account func (a *PeerAccount) GetCodeHash() []byte { - return nil + return a.CodeHash } // SetCodeHash sets the code hash associated with the account func (a *PeerAccount) SetCodeHash(codeHash []byte) { + a.CodeHash = codeHash } // SetCodeHashWithJournal sets the account's code hash, saving the old code hash before changing func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { - return nil + entry, err := NewBaseJournalEntryCodeHash(a, a.CodeHash) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CodeHash = codeHash + + return a.accountTracker.SaveAccount(a) } // GetCode gets the actual code that needs to be run in the VM func (a *PeerAccount) GetCode() []byte { - return nil + return a.code } // SetCode sets the actual code that needs to be run in the VM func (a *PeerAccount) SetCode(code []byte) { + a.code = code } -//------- data trie / root hash - // GetRootHash returns the root hash associated with this account func (a *PeerAccount) GetRootHash() []byte { return a.RootHash diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go index 7bf2df5b486..e5b669fb7f1 100644 --- a/data/state/peerAccount_test.go +++ b/data/state/peerAccount_test.go @@ -1 +1,592 @@ -package state +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccount_MarshalUnmarshal_ShouldWork(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + addrTr := &mock.AccountTrackerStub{} + acnt, _ := state.NewPeerAccount(addr, addrTr) + + marshalizer := mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(&acnt) + + acntRecovered, _ := state.NewPeerAccount(addr, addrTr) + _ = marshalizer.Unmarshal(acntRecovered, buff) + + assert.Equal(t, acnt, acntRecovered) +} + +func TestPeerAccount_NewAccountNilAddress(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccount_NewPeerAccountNilAaccountTracker(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccount_NewPeerAccountOk(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} + +func TestPeerAccount_AddressContainer(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + acc, err := state.NewPeerAccount(addr, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, addr, acc.AddressContainer()) +} + +func TestPeerAccount_GetCode(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCode(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCode()) +} + +func TestPeerAccount_GetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.CodeHash = code + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_SetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCodeHash(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_GetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.RootHash = root + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_SetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.SetRootHash(root) + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_DataTrie(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + trie := &mock.TrieStub{} + acc.SetDataTrie(trie) + + assert.NotNil(t, acc) + assert.Equal(t, trie, acc.DataTrie()) +} + +func TestPeerAccount_SetNonceWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + nonce := uint64(0) + err = acc.SetNonceWithJournal(nonce) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, nonce, acc.Nonce) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCodeHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + codeHash := []byte("codehash") + err = acc.SetCodeHashWithJournal(codeHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, codeHash, acc.CodeHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRootHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rootHash := []byte("roothash") + err = acc.SetRootHashWithJournal(rootHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rootHash, acc.RootHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetAddressWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + address := []byte("address") + err = acc.SetAddressWithJournal(address) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, address, acc.Address) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetSchnorrPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetSchnorrPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.SchnorrPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetBLSPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetBLSPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.BLSPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetStakeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + stake := big.NewInt(250000) + err = acc.SetStakeWithJournal(stake) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), acc.Stake.Uint64()) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCurrentShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetCurrentShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.CurrentShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNextShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetNextShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.NextShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNodeInWaitingListWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + err = acc.SetNodeInWaitingListWithJournal(true) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, true, acc.NodeInWaitingList) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRatingWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rating := uint32(10) + err = acc.SetRatingWithJournal(rating) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rating, acc.Rating) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetJailTimeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + jailTime := state.TimePeriod{ + StartTime: state.TimeStamp{Epoch: 12, Round: 12}, + EndTime: state.TimeStamp{Epoch: 13, Round: 13}, + } + err = acc.SetJailTimeWithJournal(jailTime) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, jailTime, acc.JailTime) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, 11, acc.LeaderSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, 11, acc.ValidatorSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, 11, acc.LeaderSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, 11, acc.ValidatorSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} diff --git a/data/state/peerAccountsDB.go b/data/state/peerAccountsDB.go new file mode 100644 index 00000000000..7bf2df5b486 --- /dev/null +++ b/data/state/peerAccountsDB.go @@ -0,0 +1 @@ +package state From e6f79f9f8dfa4be1a15d37397cd26c34eb6b8a18 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 22 Jul 2019 12:20:48 +0300 Subject: [PATCH 038/234] peer account factory --- cmd/node/factory/structs.go | 7 +-- data/state/errors.go | 2 + data/state/factory/accountCreatorFactory.go | 31 ++++++---- .../factory/accountCreatorFactory_test.go | 43 +++++++------- data/state/factory/accountCreator_test.go | 18 +----- data/state/factory/metaAccountCreator_test.go | 19 +------ data/state/factory/peerAccountCreator_test.go | 56 ++++++++++++++++++- data/state/peerAccount.go | 18 +++++- data/state/peerAccount_test.go | 8 +-- 9 files changed, 128 insertions(+), 74 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 53a68947bb7..8af4e3aeca3 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -66,7 +66,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm/iele/common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" @@ -204,7 +203,7 @@ func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { return nil, errors.New("could not create address converter: " + err.Error()) } - accountFactory, err := factoryState.NewAccountFactoryCreator(args.shardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, errors.New("could not create account factory: " + err.Error()) } @@ -1161,7 +1160,7 @@ func generateGenesisHeadersForInit( return nil, err } - accountFactory, err := factoryState.NewAccountFactoryCreator(newShardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, err } @@ -1263,7 +1262,7 @@ func newShardBlockProcessorAndTracker( //TODO replace this with a vm factory cryptoHook := hooks.NewVMCryptoHook() - ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, ielecommon.Danse) + ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, endpoint.Danse) scProcessor, err := smartContract.NewSmartContractProcessor( ieleVM, diff --git a/data/state/errors.go b/data/state/errors.go index 635f40ab18c..c2a4ef1f514 100644 --- a/data/state/errors.go +++ b/data/state/errors.go @@ -142,3 +142,5 @@ var ErrNilSchnorrPublicKey = errors.New("schnorr public key is nil") // ErrNilBLSPublicKey signals that the provided BLS public key is nil var ErrNilBLSPublicKey = errors.New("bls public key is nil") + +var ErrUnknownAccountType = errors.New("account type is unknown") diff --git a/data/state/factory/accountCreatorFactory.go b/data/state/factory/accountCreatorFactory.go index 8aa90bc777f..a1452003aa0 100644 --- a/data/state/factory/accountCreatorFactory.go +++ b/data/state/factory/accountCreatorFactory.go @@ -2,22 +2,29 @@ package factory import ( "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" ) -// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id -func NewAccountFactoryCreator(coordinator sharding.Coordinator) (state.AccountFactory, error) { - if coordinator == nil { - return nil, state.ErrNilShardCoordinator - } +type Type uint8 - if coordinator.SelfId() < coordinator.NumberOfShards() { - return NewAccountCreator(), nil - } +const ( + UserAccount Type = 0 + ShardStatistics Type = 1 + ValidatorAccount Type = 2 + InvalidType Type = 3 +) - if coordinator.SelfId() == sharding.MetachainShardId { +// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id +func NewAccountFactoryCreator(accountType Type) (state.AccountFactory, error) { + switch accountType { + case UserAccount: + return NewAccountCreator(), nil + case ShardStatistics: return NewMetaAccountCreator(), nil + case ValidatorAccount: + return NewPeerAccountCreator(), nil + case InvalidType: + return nil, state.ErrUnknownAccountType + default: + return nil, state.ErrUnknownAccountType } - - return nil, state.ErrUnknownShardId } diff --git a/data/state/factory/accountCreatorFactory_test.go b/data/state/factory/accountCreatorFactory_test.go index 67d4f06151e..ebf94f1de76 100644 --- a/data/state/factory/accountCreatorFactory_test.go +++ b/data/state/factory/accountCreatorFactory_test.go @@ -6,27 +6,22 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) -func TestNewAccountFactoryCreator_NilShardCoordinator(t *testing.T) { +func TestNewAccountFactoryCreator_BadType(t *testing.T) { t.Parallel() - accF, err := factory.NewAccountFactoryCreator(nil) + accF, err := factory.NewAccountFactoryCreator(factory.InvalidType) - assert.Equal(t, err, state.ErrNilShardCoordinator) + assert.Equal(t, err, state.ErrUnknownAccountType) assert.Nil(t, accF) } func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) @@ -40,11 +35,7 @@ func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) @@ -55,14 +46,24 @@ func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_BadShardID(t *testing.T) { +func TestNewAccountFactoryCreator_PeerAccount(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: 10, - NrOfShards: 5, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + _, ok := accWrp.(*state.PeerAccount) + assert.Equal(t, true, ok) + + assert.Nil(t, err) + assert.NotNil(t, accF) +} + +func TestNewAccountFactoryCreator_UnknownType(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(10) assert.Nil(t, accF) - assert.Equal(t, state.ErrUnknownShardId, err) + assert.Equal(t, state.ErrUnknownAccountType, err) } diff --git a/data/state/factory/accountCreator_test.go b/data/state/factory/accountCreator_test.go index eaf40d4f4c1..1ffc6d27a7e 100644 --- a/data/state/factory/accountCreator_test.go +++ b/data/state/factory/accountCreator_test.go @@ -12,11 +12,7 @@ import ( func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -31,11 +27,7 @@ func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -50,11 +42,7 @@ func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) diff --git a/data/state/factory/metaAccountCreator_test.go b/data/state/factory/metaAccountCreator_test.go index c8d661b3503..326ba3e719c 100644 --- a/data/state/factory/metaAccountCreator_test.go +++ b/data/state/factory/metaAccountCreator_test.go @@ -6,18 +6,13 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -32,11 +27,7 @@ func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -51,11 +42,7 @@ func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestMetaAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) diff --git a/data/state/factory/peerAccountCreator_test.go b/data/state/factory/peerAccountCreator_test.go index 7312cd2e2d8..4496bcdae3e 100644 --- a/data/state/factory/peerAccountCreator_test.go +++ b/data/state/factory/peerAccountCreator_test.go @@ -1 +1,55 @@ -package factory +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccountCreator_CreateAccountNilAddress(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccountCreator_CreateAccountOk(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 45dbd4a8cb9..9d78f253495 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -178,6 +178,10 @@ func (a *PeerAccount) DataTrieTracker() DataTrieTracker { // SetAddressWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetAddressWithJournal(address []byte) error { + if len(address) < 1 { + return ErrNilAddress + } + entry, err := NewPeerJournalEntryAddress(a, a.Address) if err != nil { return err @@ -191,6 +195,10 @@ func (a *PeerAccount) SetAddressWithJournal(address []byte) error { // SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilSchnorrPublicKey + } + entry, err := NewPeerJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) if err != nil { return err @@ -204,6 +212,10 @@ func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { // SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilBLSPublicKey + } + entry, err := NewPeerJournalEntryBLSPublicKey(a, a.BLSPublicKey) if err != nil { return err @@ -217,6 +229,10 @@ func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { // SetStakeWithJournal sets the account's stake, saving the old stake before changing func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { + if stake == nil { + return ErrNilStake + } + entry, err := NewPeerJournalEntryStake(a, a.Stake) if err != nil { return err @@ -303,7 +319,7 @@ func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { } a.accountTracker.Journalize(entry) - a.ValidatorSuccessRate.NrFailure-- + a.ValidatorSuccessRate.NrFailure++ return a.accountTracker.SaveAccount(a) } diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go index e5b669fb7f1..8c762964d6d 100644 --- a/data/state/peerAccount_test.go +++ b/data/state/peerAccount_test.go @@ -502,7 +502,7 @@ func TestPeerAccount_IncreaseLeaderSuccessRateWithJournal(t *testing.T) { assert.NotNil(t, acc) assert.Nil(t, err) - assert.Equal(t, 11, acc.LeaderSuccessRate.NrSuccess) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrSuccess) assert.Equal(t, 1, journalizeCalled) assert.Equal(t, 1, saveAccountCalled) } @@ -530,7 +530,7 @@ func TestPeerAccount_IncreaseValidatorSuccessRateWithJournal(t *testing.T) { assert.NotNil(t, acc) assert.Nil(t, err) - assert.Equal(t, 11, acc.ValidatorSuccessRate.NrSuccess) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrSuccess) assert.Equal(t, 1, journalizeCalled) assert.Equal(t, 1, saveAccountCalled) } @@ -558,7 +558,7 @@ func TestPeerAccount_DecreaseLeaderSuccessRateWithJournal(t *testing.T) { assert.NotNil(t, acc) assert.Nil(t, err) - assert.Equal(t, 11, acc.LeaderSuccessRate.NrFailure) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrFailure) assert.Equal(t, 1, journalizeCalled) assert.Equal(t, 1, saveAccountCalled) } @@ -586,7 +586,7 @@ func TestPeerAccount_DecreaseValidatorSuccessRateWithJournal(t *testing.T) { assert.NotNil(t, acc) assert.Nil(t, err) - assert.Equal(t, 11, acc.ValidatorSuccessRate.NrFailure) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrFailure) assert.Equal(t, 1, journalizeCalled) assert.Equal(t, 1, saveAccountCalled) } From b629911170fc95c86b34f86b221701a7d74496d6 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 22 Jul 2019 12:40:40 +0300 Subject: [PATCH 039/234] peer account factory --- data/state/peerAccountsDB.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/data/state/peerAccountsDB.go b/data/state/peerAccountsDB.go index 7bf2df5b486..5b202313394 100644 --- a/data/state/peerAccountsDB.go +++ b/data/state/peerAccountsDB.go @@ -1 +1,10 @@ package state + +import "github.com/ElrondNetwork/elrond-go/sharding" + +// peerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator +type peerAccountsDB struct { + *AccountsDB + + shardCoordinator sharding.Coordinator +} From 6f467b524c10ca4c2fcd74d50bd2bf1307be5db1 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 22 Jul 2019 16:27:22 +0300 Subject: [PATCH 040/234] fix after review --- data/address/specialAddresses.go | 6 +- data/feeTx/feeTx.go | 2 +- .../mock/specialAddressHandlerMock.go | 6 +- process/block/preprocess/transactions.go | 3 +- process/coordinator/process.go | 8 +- process/interface.go | 2 +- process/mock/specialAddressHandlerMock.go | 6 +- process/transaction/export_test.go | 9 ++- process/transaction/process.go | 75 +++++++++---------- process/unsigned/feeTxHandler.go | 23 ++++-- 10 files changed, 78 insertions(+), 62 deletions(-) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 733049e0f00..10d4aa6dee5 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -70,11 +70,11 @@ func (sp *specialAddresses) LeaderAddress() []byte { } // ShardIdForAddress calculates shard id for address -func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) uint32 { +func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) if err != nil { - return sp.shardCoordinator.NumberOfShards() + return 0, err } - return sp.shardCoordinator.ComputeId(convAdr) + return sp.shardCoordinator.ComputeId(convAdr), nil } diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go index 1ef6f311eba..4560447e4c2 100644 --- a/data/feeTx/feeTx.go +++ b/data/feeTx/feeTx.go @@ -13,7 +13,7 @@ type FeeTx struct { Nonce uint64 `capid:"0" json:"nonce"` Value *big.Int `capid:"1" json:"value"` RcvAddr []byte `capid:"2" json:"receiver"` - ShardId uint32 `capid:"3" json:"ShardId"` + ShardId uint32 `capid:"3" json:"shardId"` } // Save saves the serialized data of a FeeTx into a stream through Capnp protocol diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index c9848b44ddd..2c6f4207c50 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -4,7 +4,7 @@ type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte LeaderAddressCalled func() []byte BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) uint32 + ShardIdForAddressCalled func([]byte) (uint32, error) } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { @@ -37,9 +37,9 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { return sh.LeaderAddressCalled() } -func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { if sh.ShardIdForAddressCalled == nil { - return 0 + return 0, nil } return sh.ShardIdForAddressCalled(addr) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 4f8bf40c839..c364b522349 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -210,10 +210,9 @@ func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint32, return process.ErrMissingTransaction } - currTx := txInfo.tx err := txs.processAndRemoveBadTransaction( txHash, - currTx, + txInfo.tx, round, miniBlock.SenderShardID, miniBlock.ReceiverShardID, diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 8ccb09e57c0..3ef278caf0b 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -330,7 +330,6 @@ func (tc *transactionCoordinator) ProcessBlockTransaction(body block.Body, round var errFound error errMutex := sync.Mutex{} - // TODO: think if it is good in parallel or it is needed in sequences wg := sync.WaitGroup{} wg.Add(len(separatedBodies)) @@ -496,13 +495,13 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl tc.mutInterimProcessors.Lock() resMutex := sync.Mutex{} - // TODO: think if it is good in parallel or it is needed in sequences wg := sync.WaitGroup{} wg.Add(len(tc.interimProcessors)) for key, interimProc := range tc.interimProcessors { if key == block.TxFeeBlock { // this has to be processed last + wg.Done() continue } @@ -669,10 +668,14 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body tc.mutInterimProcessors.Lock() var errFound error + errMutex := sync.Mutex{} + wg := sync.WaitGroup{} + wg.Add(len(tc.interimProcessors)) for key, interimProc := range tc.interimProcessors { if key == block.TxFeeBlock { // this has to be processed last + wg.Done() continue } @@ -687,6 +690,7 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body }(interimProc) } + wg.Wait() tc.mutInterimProcessors.Unlock() if errFound != nil { diff --git a/process/interface.go b/process/interface.go index af73480ef3c..6b36fef70df 100644 --- a/process/interface.go +++ b/process/interface.go @@ -89,7 +89,7 @@ type SpecialAddressHandler interface { SetLeaderAddress(leader []byte) LeaderAddress() []byte BurnAddress() []byte - ShardIdForAddress([]byte) uint32 + ShardIdForAddress([]byte) (uint32, error) } // PreProcessor is an interface used to prepare and process transaction data diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index c9848b44ddd..2c6f4207c50 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -4,7 +4,7 @@ type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte LeaderAddressCalled func() []byte BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) uint32 + ShardIdForAddressCalled func([]byte) (uint32, error) } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { @@ -37,9 +37,9 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { return sh.LeaderAddressCalled() } -func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) uint32 { +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { if sh.ShardIdForAddressCalled == nil { - return 0 + return 0, nil } return sh.ShardIdForAddressCalled(addr) diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index b7d4684c65f..5bd3c1a577b 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process/unsigned" ) type TxProcessor *txProcessor @@ -33,14 +34,14 @@ func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { return txProc.increaseNonce(acntSrc) } -func (txProc *txProcessor) SetMinTxFee(minTxFee int64) { +func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { mutex.Lock() - MinTxFee = minTxFee + unsigned.MinTxFee = minTxFee mutex.Unlock() } -func (txProc *txProcessor) SetMinGasPrice(minGasPrice int64) { +func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { mutex.Lock() - MinGasPrice = minGasPrice + unsigned.MinGasPrice = minGasPrice mutex.Unlock() } diff --git a/process/transaction/process.go b/process/transaction/process.go index 0c8c3772d6a..72b2103dc54 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -2,24 +2,22 @@ package transaction import ( "bytes" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/feeTx" "math/big" "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() -var MinGasPrice = int64(1) -var MinTxFee = int64(1) - // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { accounts state.AccountsAdapter @@ -99,29 +97,13 @@ func (txProc *txProcessor) ProcessTransaction(tx data.TransactionHandler, roundI switch txType { case process.MoveBalance: - currTx, ok := tx.(*transaction.Transaction) - if !ok { - return process.ErrWrongTypeAssertion - } - return txProc.processMoveBalance(currTx, adrSrc, adrDst) + return txProc.processMoveBalance(tx, adrSrc, adrDst) case process.SCDeployment: - currTx, ok := tx.(*transaction.Transaction) - if !ok { - return process.ErrWrongTypeAssertion - } - return txProc.processSCDeployment(currTx, adrSrc, roundIndex) + return txProc.processSCDeployment(tx, adrSrc, roundIndex) case process.SCInvoking: - currTx, ok := tx.(*transaction.Transaction) - if !ok { - return process.ErrWrongTypeAssertion - } - return txProc.processSCInvoking(currTx, adrSrc, adrDst, roundIndex) + return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) case process.TxFee: - currTxFee, ok := tx.(*feeTx.FeeTx) - if !ok { - return process.ErrWrongTypeAssertion - } - return txProc.processAccumulatedTxFees(currTxFee, adrSrc) + return txProc.processAccumulatedTxFees(tx, adrSrc) } return process.ErrWrongTransaction @@ -135,10 +117,10 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st cost := big.NewInt(0) cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - txDataLen := int64(len(tx.Data)) + 1 + txDataLen := int64(len(tx.Data)) minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(MinGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(MinTxFee)) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(unsigned.MinGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(unsigned.MinTxFee)) if minFee.Cmp(cost) > 0 { return nil, process.ErrNotEnoughFeeInTransactions @@ -163,9 +145,14 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st } func (txProc *txProcessor) processAccumulatedTxFees( - currTxFee *feeTx.FeeTx, + tx data.TransactionHandler, adrSrc state.AddressContainer, ) error { + currTxFee, ok := tx.(*feeTx.FeeTx) + if !ok { + return process.ErrWrongTypeAssertion + } + acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) if err != nil { return err @@ -188,10 +175,13 @@ func (txProc *txProcessor) processAccumulatedTxFees( } func (txProc *txProcessor) processMoveBalance( - tx *transaction.Transaction, + tx data.TransactionHandler, adrSrc, adrDst state.AddressContainer, ) error { - + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) @@ -199,15 +189,15 @@ func (txProc *txProcessor) processMoveBalance( return err } - currFeeTx, err := txProc.processTxFee(tx, acntSrc) + currFeeTx, err := txProc.processTxFee(currTx, acntSrc) if err != nil { return err } - value := tx.Value + value := currTx.Value // is sender address in node shard if acntSrc != nil { - err = txProc.checkTxValues(acntSrc, value, tx.Nonce) + err = txProc.checkTxValues(acntSrc, value, currTx.Nonce) if err != nil { return err } @@ -232,10 +222,15 @@ func (txProc *txProcessor) processMoveBalance( } func (txProc *txProcessor) processSCDeployment( - tx *transaction.Transaction, + tx data.TransactionHandler, adrSrc state.AddressContainer, roundIndex uint32, ) error { + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. acntSrc, err := txProc.getAccountFromAddress(adrSrc) @@ -243,15 +238,19 @@ func (txProc *txProcessor) processSCDeployment( return err } - err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) + err = txProc.scProcessor.DeploySmartContract(currTx, acntSrc, roundIndex) return err } func (txProc *txProcessor) processSCInvoking( - tx *transaction.Transaction, + tx data.TransactionHandler, adrSrc, adrDst state.AddressContainer, roundIndex uint32, ) error { + currTx, ok := tx.(*transaction.Transaction) + if !ok { + return process.ErrWrongTypeAssertion + } // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) @@ -259,7 +258,7 @@ func (txProc *txProcessor) processSCInvoking( return err } - err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) + err = txProc.scProcessor.ExecuteSmartContractTransaction(currTx, acntSrc, acntDst, roundIndex) return err } diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index fd1154b0c2b..20cd00ca581 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -12,6 +12,12 @@ import ( "sync" ) +// MinGasPrice is the minimal gas price to be paid for any transaction +var MinGasPrice = uint64(1) + +// MinTxFee is the minimal fee to be paid for any transaction +var MinTxFee = uint64(1) + const communityPercentage = 0.1 // 1 = 100%, 0 = 0% const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% const burnPercentage = 0.5 // 1 = 100%, 0 = 0% @@ -70,7 +76,11 @@ func (ftxh *feeTxHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock miniBlocks := make(map[uint32]*block.MiniBlock) for _, value := range calculatedFeeTxs { - dstShId := ftxh.address.ShardIdForAddress(value.GetRecvAddress()) + dstShId, err := ftxh.address.ShardIdForAddress(value.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } txHash, err := core.CalculateHash(ftxh.marshalizer, ftxh.hasher, value) if err != nil { @@ -101,6 +111,10 @@ func (ftxh *feeTxHandler) VerifyInterMiniBlocks(body block.Body) error { return err } +func (ftxh *feeTxHandler) CreateBlockStarted() { + ftxh.CleanProcessedUTxs() +} + // CleanProcessedUTxs deletes the cached data func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Lock() @@ -109,6 +123,7 @@ func (ftxh *feeTxHandler) CleanProcessedUTxs() { ftxh.mutTxs.Unlock() } +// AddTxFeeFromBlock adds an existing txfee from block into local cache func (ftxh *feeTxHandler) AddTxFeeFromBlock(tx data.TransactionHandler) { currFeeTx, ok := tx.(*feeTx.FeeTx) if !ok { @@ -173,7 +188,7 @@ func (ftxh *feeTxHandler) createCommunityTx(totalGathered *big.Int) *feeTx.FeeTx return currTx } -// CreateAllUtxs creates all the needed fee transactions +// CreateAllUTxs creates all the needed fee transactions // According to economic paper 50% burn, 40% to the leader, 10% to Elrond community fund func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { ftxh.mutTxs.Lock() @@ -194,9 +209,7 @@ func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { burnTx := ftxh.createBurnTx(totalFee) currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx) - currFeeTxs = append(currFeeTxs, communityTx) - currFeeTxs = append(currFeeTxs, burnTx) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) ftxh.feeTxs = make([]*feeTx.FeeTx, 0) From 6395dfa891b9fc6f3e2bb4c5cd7676ecd3592de1 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 22 Jul 2019 17:30:08 +0300 Subject: [PATCH 041/234] fix after review --- process/coordinator/process.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 3ef278caf0b..a34526378c8 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -458,20 +458,27 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe(maxTxRem miniBlocks = append(miniBlocks, interMBs...) } + tc.addTxFeeToMatchingMiniBlocks(&miniBlocks) + + return miniBlocks +} + +func (tc *transactionCoordinator) addTxFeeToMatchingMiniBlocks(miniBlocks *block.MiniBlockSlice) { // add txfee transactions to matching blocks interimProc := tc.getInterimProcessor(block.TxFeeBlock) if interimProc == nil { - return miniBlocks + return } txFeeMbs := interimProc.CreateAllInterMiniBlocks() for key, mb := range txFeeMbs { var matchingMBFound bool - for i := 0; i < len(miniBlocks); i++ { - if miniBlocks[i].ReceiverShardID == key && - miniBlocks[i].SenderShardID == tc.shardCoordinator.SelfId() && - miniBlocks[i].Type == block.TxBlock { - miniBlocks[i].TxHashes = append(miniBlocks[i].TxHashes, mb.TxHashes...) + for i := 0; i < len(*miniBlocks); i++ { + currMb := (*miniBlocks)[i] + if currMb.ReceiverShardID == key && + currMb.SenderShardID == tc.shardCoordinator.SelfId() && + currMb.Type == block.TxBlock { + currMb.TxHashes = append(currMb.TxHashes, mb.TxHashes...) matchingMBFound = true break } @@ -482,11 +489,9 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe(maxTxRem mb.SenderShardID = tc.shardCoordinator.SelfId() mb.Type = block.TxBlock - miniBlocks = append(miniBlocks, mb) + *miniBlocks = append(*miniBlocks, mb) } } - - return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { From 1708276cf2dad08a02d4e49581392bafb09fb291 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 24 Jul 2019 13:29:42 +0900 Subject: [PATCH 042/234] sharding, consensus, process: Fix review findings Move validator from consensus to sharding Signature verification for metachain blocks should be done on block hash Adapt unit and integration tests --- cmd/node/main.go | 3 +-- consensus/errors.go | 14 ------------ .../multiShard/block/testInitializer.go | 2 +- .../multiShard/metablock/testInitializer.go | 6 ++--- .../smartContract/testInitilalizer.go | 4 ++-- .../block/interceptedRequestHdr_test.go | 7 +----- node/node.go | 4 +--- process/block/interceptedBlockHeader.go | 7 +++--- process/block/interceptedMetaBlockHeader.go | 10 +++++++-- .../block/interceptedMetaBlockHeader_test.go | 1 + .../headerInterceptorBase_test.go | 5 ++--- .../interceptors/headerInterceptor_test.go | 6 ++--- .../metachainHeaderInterceptor.go | 7 +++++- .../metachainHeaderInterceptor_test.go | 15 ++++++++----- sharding/errors.go | 9 ++++++++ sharding/indexHashedNodesCoordinator.go | 15 +++++++++++-- ...dinatorMock.go => nodesCoordinatorMock.go} | 0 {consensus => sharding}/validator.go | 2 +- {consensus => sharding}/validator_test.go | 22 +++++++++---------- 19 files changed, 75 insertions(+), 64 deletions(-) delete mode 100644 consensus/errors.go rename sharding/mock/{NodesCoordinatorMock.go => nodesCoordinatorMock.go} (100%) rename {consensus => sharding}/validator.go (97%) rename {consensus => sharding}/validator_test.go (56%) diff --git a/cmd/node/main.go b/cmd/node/main.go index 6da7042b9ce..7cf5beec2aa 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -20,7 +20,6 @@ import ( "github.com/ElrondNetwork/elrond-go/cmd/node/factory" "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" @@ -758,7 +757,7 @@ func createNodesCoordinator( validators := make([]sharding.Validator, 0) for _, pubKey := range pubKeyList { // TODO: the stake needs to be associated to the staking account - validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(pubKey)) + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(pubKey)) if err != nil { return nil, err } diff --git a/consensus/errors.go b/consensus/errors.go deleted file mode 100644 index a280f1a5f80..00000000000 --- a/consensus/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package consensus - -import ( - "errors" -) - -// ErrNilStake signals that a nil stake structure has been provided -var ErrNilStake = errors.New("nil stake") - -// ErrNegativeStake signals that the stake is negative -var ErrNegativeStake = errors.New("negative stake") - -// ErrNilPubKey signals that the public key is nil -var ErrNilPubKey = errors.New("nil public key") diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 4065a86618b..395b5e81f62 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -433,7 +433,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index f9cdcea3abe..fff103f1715 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -5,7 +5,7 @@ import ( "crypto/ecdsa" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/process/coordinator" + "math/big" "math/rand" "strings" "sync/atomic" @@ -44,6 +44,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" @@ -53,7 +54,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "math/big" ) var r *rand.Rand @@ -89,7 +89,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 37a0a57755f..4c84e06e17e 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -55,7 +55,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) @@ -445,7 +445,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index 7c1bbdafcc6..30405fa8b78 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -28,7 +27,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators @@ -98,8 +97,6 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { //Step 1. Generate 2 headers, one will be stored in datapool, the other one in storage hdr1 := block.Header{ Nonce: 0, - PubKeysBitmap: nil, - Signature: nil, PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), Round: 1, @@ -114,8 +111,6 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { hdr2 := block.Header{ Nonce: 0, - PubKeysBitmap: nil, - Signature: nil, PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), Round: 1, diff --git a/node/node.go b/node/node.go index bd5aeef1dc8..cd83c0b1d94 100644 --- a/node/node.go +++ b/node/node.go @@ -88,7 +88,6 @@ type Node struct { metaDataPool dataRetriever.MetaPoolsHolder store dataRetriever.StorageService shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator consensusTopic string consensusType string @@ -290,7 +289,6 @@ func (n *Node) StartConsensus() error { n.syncTimer, nCoordinator, ) - if err != nil { return err } @@ -528,7 +526,7 @@ func (n *Node) createValidatorsForShard(nodesMap map[uint32][]sharding.Validator nodesMap[shId] = make([]sharding.Validator, nodesInShard) for i := 0; i < nodesInShard; i++ { - validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shId][i])) + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shId][i])) if err != nil { return err } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 8d762ad82fe..7940ca88fbb 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -1,6 +1,7 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/hashing" @@ -114,7 +115,7 @@ func (inHdr *InterceptedHeader) Integrity(coordinator sharding.Coordinator) erro } } -// VerifySig verifies a signature +// VerifySig verifies the intercepted Header block signature func (inHdr *InterceptedHeader) VerifySig() error { randSeed := inHdr.GetPrevRandSeed() bitmap := inHdr.GetPubKeysBitmap() @@ -127,6 +128,7 @@ func (inHdr *InterceptedHeader) VerifySig() error { return process.ErrBlockProposerSignatureMissing } + consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed) if err != nil { return err @@ -148,12 +150,11 @@ func (inHdr *InterceptedHeader) VerifySig() error { headerCopy.Signature = nil headerCopy.PubKeysBitmap = nil - headerBytes, err := inHdr.marshalizer.Marshal(headerCopy) + hash, err := core.CalculateHash(inHdr.marshalizer, inHdr.hasher, headerCopy) if err != nil { return err } - hash := inHdr.hasher.Compute(string(headerBytes)) err = verifier.Verify(hash, bitmap) return err diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index 6dc385f2e38..b033cf4c16b 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -1,8 +1,10 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -16,6 +18,7 @@ type InterceptedMetaHeader struct { hash []byte nodesCoordinator sharding.NodesCoordinator marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct @@ -23,6 +26,7 @@ func NewInterceptedMetaHeader( multiSigVerifier crypto.MultiSigVerifier, nodesCoordinator sharding.NodesCoordinator, marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedMetaHeader { return &InterceptedMetaHeader{ @@ -30,6 +34,7 @@ func NewInterceptedMetaHeader( multiSigVerifier: multiSigVerifier, nodesCoordinator: nodesCoordinator, marshalizer: marshalizer, + hasher: hasher, } } @@ -115,6 +120,7 @@ func (imh *InterceptedMetaHeader) VerifySig() error { return process.ErrBlockProposerSignatureMissing } + consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed) if err != nil { return err @@ -136,12 +142,12 @@ func (imh *InterceptedMetaHeader) VerifySig() error { headerCopy.Signature = nil headerCopy.PubKeysBitmap = nil - headerBytes, err := imh.marshalizer.Marshal(headerCopy) + hash, err := core.CalculateHash(imh.marshalizer, imh.hasher, headerCopy) if err != nil { return err } - err = verifier.Verify(headerBytes, bitmap) + err = verifier.Verify(hash, bitmap) return err } diff --git a/process/block/interceptedMetaBlockHeader_test.go b/process/block/interceptedMetaBlockHeader_test.go index 11a3b91fed0..43dd2810320 100644 --- a/process/block/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedMetaBlockHeader_test.go @@ -15,6 +15,7 @@ func createTestInterceptedMetaHeader() *block.InterceptedMetaHeader { mock.NewMultiSigner(), &mock.NodesCoordinatorMock{}, &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index c9ba001875b..d4073faa9fe 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -6,7 +6,6 @@ import ( "math/big" "testing" - "github.com/ElrondNetwork/elrond-go/consensus" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" @@ -241,13 +240,13 @@ func createNodesCoordinator() sharding.NodesCoordinator { shardValidators := make([]sharding.Validator, 0) for i := 0; i < 16; i++ { pubKeyStr := fmt.Sprintf("pk_shard0_%d", i) - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pubKeyStr)) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pubKeyStr)) shardValidators = append(shardValidators, v) } //metachain pubKeyBytes := []byte("pk_meta") - v, _ := consensus.NewValidator(big.NewInt(0), 1, pubKeyBytes) + v, _ := sharding.NewValidator(big.NewInt(0), 1, pubKeyBytes) validators[0] = shardValidators validators[sharding.MetachainShardId] = []sharding.Validator{v} diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 303168a3cda..c95111b8272 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" @@ -25,12 +24,11 @@ var durTimeout = time.Duration(time.Second) func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32][]sharding.Validator { nodes := make(map[uint32][]sharding.Validator) - // shards for shard := uint32(0); shard < nbShards; shard++ { shardNodes := make([]sharding.Validator, 0) for valIdx := uint32(0); valIdx < shardSize; valIdx++ { pk := fmt.Sprintf("pubKey_sh%d_node%d", shard, valIdx) - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pk)) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk)) shardNodes = append(shardNodes, v) } nodes[shard] = shardNodes @@ -39,7 +37,7 @@ func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32 metaNodes := make([]sharding.Validator, 0) for mValIdx := uint32(0); mValIdx < metachainSize; mValIdx++ { pk := fmt.Sprintf("pubKey_meta_node%d", mValIdx) - v, _ := consensus.NewValidator(big.NewInt(0), 1, []byte(pk)) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk)) metaNodes = append(metaNodes, v) } nodes[sharding.MetachainShardId] = metaNodes diff --git a/process/block/interceptors/metachainHeaderInterceptor.go b/process/block/interceptors/metachainHeaderInterceptor.go index 405c4d16432..dd78f932036 100644 --- a/process/block/interceptors/metachainHeaderInterceptor.go +++ b/process/block/interceptors/metachainHeaderInterceptor.go @@ -85,7 +85,12 @@ func (mhi *MetachainHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } - metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier, mhi.nodesCoordinator, mhi.marshalizer) + metaHdrIntercepted := block.NewInterceptedMetaHeader( + mhi.multiSigVerifier, + mhi.nodesCoordinator, + mhi.marshalizer, + mhi.hasher, + ) err = mhi.marshalizer.Unmarshal(metaHdrIntercepted, message.Data()) if err != nil { return err diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index 726de1683df..31514485734 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -282,6 +282,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul metachainHeaders := &mock.CacherStub{} metachainStorer := &mock.StorerStub{} marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() nodesCoordinator := mock.NewNodesCoordinatorMock() @@ -291,12 +292,12 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul &mock.Uint64SyncMapCacherStub{}, metachainStorer, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -309,6 +310,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) metachainHeaders := &mock.CacherStub{} @@ -327,12 +329,12 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te metachainHeadersNonces, metachainStorer, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) hdr.PubKeysBitmap = []byte{1, 0, 0} @@ -391,6 +393,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) multisigner := mock.NewMultiSigner() @@ -410,12 +413,12 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAd metachainHeadersNonces, metachainStorer, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) hdr.PubKeysBitmap = []byte{1, 0, 0} diff --git a/sharding/errors.go b/sharding/errors.go index 291653691b6..8a9314e5514 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -13,6 +13,9 @@ var ErrInvalidShardId = errors.New("shard id must be smaller than the total numb // ErrShardIdOutOfRange signals an error when shard id is out of range var ErrShardIdOutOfRange = errors.New("shard id out of range") +// ErrNilPubKey signals that the public key is nil +var ErrNilPubKey = errors.New("nil public key") + // ErrNoPubKeys signals an error when public keys are missing var ErrNoPubKeys = errors.New("no public keys defined") @@ -60,3 +63,9 @@ var ErrNilRandomness = errors.New("nil randomness source") // ErrNilHasher signals that a nil hasher has been provided var ErrNilHasher = errors.New("nil hasher") + +// ErrNilStake signals that a nil stake structure has been provided +var ErrNilStake = errors.New("nil stake") + +// ErrNegativeStake signals that the stake is negative +var ErrNegativeStake = errors.New("negative stake") diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 8ddd2cb84bc..cfb8c45b879 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -24,6 +24,18 @@ func NewIndexHashedNodesCoordinator( shardId uint32, nbShards uint32, ) (*indexHashedNodesCoordinator, error) { + if consensusGroupSize < 1 { + return nil, ErrInvalidConsensusGroupSize + } + + if nbShards < 1 { + return nil, ErrInvalidNumberOfShards + } + + if shardId >= nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + if hasher == nil { return nil, ErrNilHasher } @@ -51,6 +63,7 @@ func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]V } ihgs.nodesMap = nodes + ihgs.expandedEligibleList = ihgs.expandEligibleList() return nil } @@ -73,8 +86,6 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt return nil, ErrNilRandomness } - ihgs.expandedEligibleList = ihgs.expandEligibleList() - tempList := make([]Validator, 0) for startIdx := 0; startIdx < ihgs.consensusGroupSize; startIdx++ { diff --git a/sharding/mock/NodesCoordinatorMock.go b/sharding/mock/nodesCoordinatorMock.go similarity index 100% rename from sharding/mock/NodesCoordinatorMock.go rename to sharding/mock/nodesCoordinatorMock.go diff --git a/consensus/validator.go b/sharding/validator.go similarity index 97% rename from consensus/validator.go rename to sharding/validator.go index ad182a66154..e6d4a65913f 100644 --- a/consensus/validator.go +++ b/sharding/validator.go @@ -1,4 +1,4 @@ -package consensus +package sharding import ( "math/big" diff --git a/consensus/validator_test.go b/sharding/validator_test.go similarity index 56% rename from consensus/validator_test.go rename to sharding/validator_test.go index 73dbf7ff572..a0db5f5e122 100644 --- a/consensus/validator_test.go +++ b/sharding/validator_test.go @@ -1,44 +1,44 @@ -package consensus_test +package sharding_test import ( "math/big" "testing" - "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { t.Parallel() - validator, err := consensus.NewValidator(nil, 0, []byte("pk1")) + validator, err := sharding.NewValidator(nil, 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, consensus.ErrNilStake, err) + assert.Equal(t, sharding.ErrNilStake, err) } func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { t.Parallel() - validator, err := consensus.NewValidator(big.NewInt(-1), 0, []byte("pk1")) + validator, err := sharding.NewValidator(big.NewInt(-1), 0, []byte("pk1")) assert.Nil(t, validator) - assert.Equal(t, consensus.ErrNegativeStake, err) + assert.Equal(t, sharding.ErrNegativeStake, err) } func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { t.Parallel() - validator, err := consensus.NewValidator(big.NewInt(0), 0, nil) + validator, err := sharding.NewValidator(big.NewInt(0), 0, nil) assert.Nil(t, validator) - assert.Equal(t, consensus.ErrNilPubKey, err) + assert.Equal(t, sharding.ErrNilPubKey, err) } func TestValidator_NewValidatorShouldWork(t *testing.T) { t.Parallel() - validator, err := consensus.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.NotNil(t, validator) assert.Nil(t, err) @@ -47,7 +47,7 @@ func TestValidator_NewValidatorShouldWork(t *testing.T) { func TestValidator_StakeShouldWork(t *testing.T) { t.Parallel() - validator, _ := consensus.NewValidator(big.NewInt(1), 0, []byte("pk1")) + validator, _ := sharding.NewValidator(big.NewInt(1), 0, []byte("pk1")) assert.Equal(t, big.NewInt(1), validator.Stake()) } @@ -55,7 +55,7 @@ func TestValidator_StakeShouldWork(t *testing.T) { func TestValidator_PubKeyShouldWork(t *testing.T) { t.Parallel() - validator, _ := consensus.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1")) assert.Equal(t, []byte("pk1"), validator.PubKey()) } From 8d6dbc0a3485844b2d6fdddbcc6a09718669abd6 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 24 Jul 2019 14:50:52 +0900 Subject: [PATCH 043/234] sharding: rename LoadNodesPerShards to SetNodesPerShards --- cmd/node/main.go | 24 ++- consensus/mock/validatorGroupSelectorMock.go | 2 +- .../frontend/wallet/txInterception_test.go | 8 +- .../multiShard/block/testInitializer.go | 4 +- .../multiShard/metablock/testInitializer.go | 18 +- .../smartContract/testInitilalizer.go | 4 +- .../multiShard/transaction/testInitializer.go | 8 +- .../block/interceptedRequestHdr_test.go | 20 ++- .../interceptedRequestTxBlockBody_test.go | 8 +- .../transaction/interceptedBulkTx_test.go | 8 +- .../transaction/interceptedResolvedTx_test.go | 8 +- node/node.go | 18 +- .../headerInterceptorBase_test.go | 2 +- .../interceptors/headerInterceptor_test.go | 6 +- process/mock/nodesCoordinatorMock.go | 2 +- sharding/indexHashedNodesCoordinator.go | 10 +- sharding/indexHashedNodesCoordinator_test.go | 167 ++++++++++-------- sharding/interface.go | 2 +- sharding/mock/nodesCoordinatorMock.go | 2 +- 19 files changed, 201 insertions(+), 120 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 7cf5beec2aa..9ea1914b2af 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -37,7 +37,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/google/gops/agent" "github.com/pkg/profile" @@ -689,7 +689,7 @@ func createShardCoordinator( pubKey crypto.PublicKey, settingsConfig config.GeneralSettingsConfig, log *logger.Logger, -) (shardCoordinator sharding.Coordinator, err error) { +) (sharding.Coordinator, error) { selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) if err == sharding.ErrPublicKeyNotFoundInGenesis { log.Info("Starting as observer node...") @@ -708,7 +708,7 @@ func createShardCoordinator( } log.Info(fmt.Sprintf("Starting in shard: %s", shardName)) - shardCoordinator, err = sharding.NewMultiShardCoordinator(nodesConfig.NumberOfShards(), selfShardId) + shardCoordinator, err := sharding.NewMultiShardCoordinator(nodesConfig.NumberOfShards(), selfShardId) if err != nil { return nil, err } @@ -740,16 +740,6 @@ func createNodesCoordinator( consensusGroupSize = int(nodesConfig.ConsensusGroupSize) } - nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - consensusGroupSize, - hasher, - shardId, - nbShards, - ) - if err != nil { - return nil, err - } - initNodesPubKeys := nodesConfig.InitialNodesPubKeys() initValidators := make(map[uint32][]sharding.Validator) @@ -767,7 +757,13 @@ func createNodesCoordinator( initValidators[shardId] = validators } - err = nodesCoordinator.LoadNodesPerShards(initValidators) + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + consensusGroupSize, + hasher, + shardId, + nbShards, + initValidators, + ) if err != nil { return nil, err } diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go index b662c91f48d..3f16e81f374 100644 --- a/consensus/mock/validatorGroupSelectorMock.go +++ b/consensus/mock/validatorGroupSelectorMock.go @@ -57,7 +57,7 @@ func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { panic("implement me") } -func (ncm NodesCoordinatorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 3a031ccf7e1..dbdc4395cbe 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -108,7 +108,13 @@ func testInterceptedTxFromFrontendGeneratedParams( accntAdapter := createAccountsDB() shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + mock.HasherMock{}, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 395b5e81f62..0d5e81c8103 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -471,6 +471,7 @@ func createNodes( testHasher, uint32(shardId), uint32(numOfShards), + make(map[uint32][]sharding.Validator), ) shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) @@ -546,6 +547,7 @@ func createNodes( testHasher, sharding.MetachainShardId, uint32(numOfShards), + make(map[uint32][]sharding.Validator), ) metaNodes[i] = createMetaNetNode( @@ -568,7 +570,7 @@ func createNodes( for _, shardCoord := range nodesCoordinators { for j := 0; j < len(shardCoord); j++ { - shardCoord[j].LoadNodesPerShards(mapValidators) + shardCoord[j].SetNodesPerShards(mapValidators) } } diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index fff103f1715..bd2b280b938 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -196,7 +196,13 @@ func createNodes( nodes := make([]*testNode, nodesInMetachain+1) //first node is a shard node shardCoordinator, _ := sharding.NewMultiShardCoordinator(1, senderShard) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, senderShard, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + senderShard, + 1, + make(map[uint32][]sharding.Validator), + ) nodes[0] = createShardNetNode( createTestShardDataPool(), @@ -214,7 +220,13 @@ func createNodes( metaPubKeys := make([]string, 0) for i := 0; i < nodesInMetachain; i++ { shardCoordinator, _ = sharding.NewMultiShardCoordinator(1, sharding.MetachainShardId) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, sharding.MetachainShardId, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + sharding.MetachainShardId, + 1, + make(map[uint32][]sharding.Validator), + ) nodes[i+1] = createMetaNetNode( createTestMetaDataPool(), createAccountsDB(), @@ -233,7 +245,7 @@ func createNodes( for _, nodeCoordList := range nodesCoordMap { for _, nodeCoord := range nodeCoordList { - nodeCoord.LoadNodesPerShards(valMap) + nodeCoord.SetNodesPerShards(valMap) } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 4c84e06e17e..0a840087cce 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -483,6 +483,7 @@ func createNodes( testHasher, uint32(shardId), uint32(numOfShards), + make(map[uint32][]sharding.Validator), ) shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) @@ -561,6 +562,7 @@ func createNodes( testHasher, sharding.MetachainShardId, uint32(numOfShards), + make(map[uint32][]sharding.Validator), ) metaNodes[i] = createMetaNetNode( @@ -583,7 +585,7 @@ func createNodes( for _, shardCoord := range nodesCoordinators { for j := 0; j < len(shardCoord); j++ { - shardCoord[j].LoadNodesPerShards(mapValidators) + shardCoord[j].SetNodesPerShards(mapValidators) } } diff --git a/integrationTests/multiShard/transaction/testInitializer.go b/integrationTests/multiShard/transaction/testInitializer.go index 0c4f8a5cbbd..e19cdd18353 100644 --- a/integrationTests/multiShard/transaction/testInitializer.go +++ b/integrationTests/multiShard/transaction/testInitializer.go @@ -369,7 +369,13 @@ func createNode( } shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(3, hasher, uint32(shardId), uint32(numOfShards)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 3, + hasher, + uint32(shardId), + uint32(numOfShards), + make(map[uint32][]sharding.Validator), + ) accntAdapter := createAccountsDB() var n *node.Node var mes p2p.Messenger diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index 30405fa8b78..dce6244c950 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -50,8 +50,20 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { storeResolver := createTestStore() shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) - nodesCoordinator2, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) + nodesCoordinator2, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) fmt.Println("Requester:") nRequester, mesRequester, _, pk1, multiSigner, resolversFinder := createNetNode( @@ -77,8 +89,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { pubKeyMap[0] = []string{string(pk1Bytes), string(pk2Bytes)} validatorsMap := genValidatorsFromPubKeys(pubKeyMap) - nodesCoordinator1.LoadNodesPerShards(validatorsMap) - nodesCoordinator2.LoadNodesPerShards(validatorsMap) + nodesCoordinator1.SetNodesPerShards(validatorsMap) + nodesCoordinator2.SetNodesPerShards(validatorsMap) _ = nRequester.Start() _ = nResolver.Start() diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go index 6cbf538ff7f..3bf25d84b0a 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go @@ -28,7 +28,13 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) fmt.Println("Requester: ") nRequester, mesRequester, _, _, _, resolversFinder := createNetNode( diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go index 96ae7e2d89e..dcab25d5bb3 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go @@ -31,7 +31,13 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { accntAdapter := createAccountsDB() shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) _ = n.Start() diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index d060d738163..d88b5864a77 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -29,7 +29,13 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator(1, hasher, 0, 1) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + hasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) fmt.Println("Requester:") nRequester, mesRequester, sk1, resolversFinder := createNetNode( diff --git a/node/node.go b/node/node.go index cd83c0b1d94..4921bf05e85 100644 --- a/node/node.go +++ b/node/node.go @@ -488,15 +488,7 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { // createNodesCoordinator creates a index hashed group selector object func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { - nCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - n.consensusGroupSize, - n.hasher, - n.shardCoordinator.SelfId(), - n.shardCoordinator.NumberOfShards(), - ) - if err != nil { - return nil, err - } + var err error nodesMap := make(map[uint32][]sharding.Validator) nbShards := n.shardCoordinator.NumberOfShards() @@ -513,7 +505,13 @@ func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { return nil, err } - err = nCoordinator.LoadNodesPerShards(nodesMap) + nCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + n.consensusGroupSize, + n.hasher, + n.shardCoordinator.SelfId(), + n.shardCoordinator.NumberOfShards(), + nodesMap, + ) if err != nil { return nil, err } diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index d4073faa9fe..ece315a1f57 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -252,7 +252,7 @@ func createNodesCoordinator() sharding.NodesCoordinator { validators[sharding.MetachainShardId] = []sharding.Validator{v} nodesCoordinator := mock.NewNodesCoordinatorMock() - nodesCoordinator.LoadNodesPerShards(validators) + nodesCoordinator.SetNodesPerShards(validators) return nodesCoordinator } diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index c95111b8272..a7e5fc7d7e4 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -182,7 +182,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) nodesCoordinator := mock.NewNodesCoordinatorMock() nodes := generateValidatorsMap(3, 3, 1) - nodesCoordinator.LoadNodesPerShards(nodes) + nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -258,7 +258,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *test nodesCoordinator := mock.NewNodesCoordinatorMock() nodes := generateValidatorsMap(3, 3, 1) - nodesCoordinator.LoadNodesPerShards(nodes) + nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -337,7 +337,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( } nodes := generateValidatorsMap(3, 3, 5) - nodesCoordinator.LoadNodesPerShards(nodes) + nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index c49aeb99563..14167833a88 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -68,7 +68,7 @@ func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]s return valGrStr, nil } -func (ncm *NodesCoordinatorMock) LoadNodesPerShards(nodes map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { if ncm.LoadNodesPerShardsCalled != nil { return ncm.LoadNodesPerShardsCalled(nodes) } diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index cfb8c45b879..30c5d2e5ab4 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -23,6 +23,7 @@ func NewIndexHashedNodesCoordinator( hasher hashing.Hasher, shardId uint32, nbShards uint32, + nodes map[uint32][]Validator, ) (*indexHashedNodesCoordinator, error) { if consensusGroupSize < 1 { return nil, ErrInvalidConsensusGroupSize @@ -53,11 +54,16 @@ func NewIndexHashedNodesCoordinator( return nil, err } + err = ihgs.SetNodesPerShards(nodes) + if err != nil { + return nil, err + } + return ihgs, nil } -// LoadNodesPerShards loads the distribution of nodes per shard into the nodes management component -func (ihgs *indexHashedNodesCoordinator) LoadNodesPerShards(nodes map[uint32][]Validator) error { +// SetNodesPerShards loads the distribution of nodes per shard into the nodes management component +func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Validator) error { if nodes == nil { return ErrNilInputNodesMap } diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index e2fe103453c..5a983cc38e3 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -22,12 +22,26 @@ func uint64ToBytes(value uint64) []byte { return buff } +func createDummyNodesMap() map[uint32][]sharding.Validator { + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + + return nodesMap +} + //------- NewIndexHashedNodesCoordinator func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, nil, 0, 1) + nodesMap := createDummyNodesMap() + + ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, nil, 0, 1, nodesMap) assert.Nil(t, ihgs) assert.Equal(t, sharding.ErrNilHasher, err) @@ -36,7 +50,14 @@ func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedNodesCoordinator(0, mock.HasherMock{}, 0, 1) + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) assert.Nil(t, ihgs) assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) @@ -45,7 +66,14 @@ func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testi func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { t.Parallel() - ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) assert.NotNil(t, ihgs) assert.Nil(t, err) @@ -56,27 +84,32 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 10, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) - assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.LoadNodesPerShards(nil)) + assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil)) } func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) - - list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - nodesMap := make(map[uint32][]sharding.Validator) - nodesMap[0] = list + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 10, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) - err := ihgs.LoadNodesPerShards(nodesMap) assert.Nil(t, err) - assert.Equal(t, list, ihgs.EligibleList()) + assert.Equal(t, nodesMap[0], ihgs.EligibleList()) } //------- ComputeValidatorsGroup @@ -84,7 +117,13 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + mock.HasherMock{}, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) list := make([]sharding.Validator, 0) @@ -97,16 +136,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testin func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(10, mock.HasherMock{}, 0, 1) - - list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - nodesMap := make(map[uint32][]sharding.Validator) - nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 10, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) @@ -117,16 +154,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *te func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, mock.HasherMock{}, 0, 1) - - list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - nodesMap := make(map[uint32][]sharding.Validator) - nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) list2, err := ihgs.ComputeValidatorsGroup(nil) @@ -139,15 +174,19 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(1, mock.HasherMock{}, 0, 1) - list := []sharding.Validator{ mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), } nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) @@ -177,21 +216,13 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi return nil } - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) - - list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - nodesMap := make(map[uint32][]sharding.Validator) - nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) assert.Nil(t, err) - assert.Equal(t, list, list2) + assert.Equal(t, nodesMap[0], list2) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrder(t *testing.T) { @@ -216,8 +247,6 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd return nil } - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) - validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) @@ -228,7 +257,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -259,21 +288,13 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex return nil } - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1) - - list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - nodesMap := make(map[uint32][]sharding.Validator) - nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) assert.Nil(t, err) - assert.Equal(t, list, list2) + assert.Equal(t, nodesMap[0], list2) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsShouldWork(t *testing.T) { @@ -310,8 +331,6 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho return convertBigIntToBytes(val) } - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(6, hasher, 0, 1) - validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2")) @@ -338,7 +357,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(6, hasher, 0, 1, nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -356,9 +375,6 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { consensusGroupSize := 21 - - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(consensusGroupSize, mock.HasherMock{}, 0, 1) - list := make([]sharding.Validator, 0) //generate 400 validators @@ -368,7 +384,14 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - _ = ihgs.LoadNodesPerShards(nodesMap) + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + consensusGroupSize, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) b.ResetTimer() diff --git a/sharding/interface.go b/sharding/interface.go index 0fccc459eb2..a4400995c34 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -29,7 +29,7 @@ type Validator interface { // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinator interface { PublicKeysSelector - LoadNodesPerShards(nodes map[uint32][]Validator) error + SetNodesPerShards(nodes map[uint32][]Validator) error ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) ConsensusGroupSize() int SetConsensusGroupSize(int) error diff --git a/sharding/mock/nodesCoordinatorMock.go b/sharding/mock/nodesCoordinatorMock.go index b5f0ba1e03c..b72e78be5a1 100644 --- a/sharding/mock/nodesCoordinatorMock.go +++ b/sharding/mock/nodesCoordinatorMock.go @@ -54,7 +54,7 @@ func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { panic("implement me") } -func (ncm NodesCoordinatorMock) LoadNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } From c91a16f37c825d831b4d7a807a3bbf7ec9953e4f Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 26 Jul 2019 10:59:49 +0300 Subject: [PATCH 044/234] integrationTests: fix unhandled error warnings --- consensus/spos/commonSubround/subroundStartRound_test.go | 3 ++- integrationTests/multiShard/block/testInitializer.go | 2 +- integrationTests/multiShard/metablock/testInitializer.go | 2 +- .../multiShard/smartContract/testInitilalizer.go | 5 ++--- .../singleShard/block/interceptedRequestHdr_test.go | 4 ++-- sharding/indexHashedNodesCoordinator.go | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 8c67041100f..7a9defc8bf3 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -27,7 +27,8 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S func defaultSubround( consensusState *spos.ConsensusState, - ch chan bool, container spos.ConsensusCoreHandler, + ch chan bool, + container spos.ConsensusCoreHandler, ) (*spos.Subround, error) { return spos.NewSubround( diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 0d5e81c8103..2be94da9d4a 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -570,7 +570,7 @@ func createNodes( for _, shardCoord := range nodesCoordinators { for j := 0; j < len(shardCoord); j++ { - shardCoord[j].SetNodesPerShards(mapValidators) + _ = shardCoord[j].SetNodesPerShards(mapValidators) } } diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index bd2b280b938..04a505c0ce1 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -245,7 +245,7 @@ func createNodes( for _, nodeCoordList := range nodesCoordMap { for _, nodeCoord := range nodeCoordList { - nodeCoord.SetNodesPerShards(valMap) + _ = nodeCoord.SetNodesPerShards(valMap) } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 0a840087cce..b409ce95807 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -55,7 +55,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) @@ -423,7 +423,6 @@ func getConnectableAddress(mes p2p.Messenger) string { func displayAndStartNodes(nodes map[uint32][]*testNode) { for _, nodeList := range nodes { - for _, n := range nodeList { skBuff, _ := n.sk.ToByteArray() pkBuff, _ := n.pk.ToByteArray() @@ -585,7 +584,7 @@ func createNodes( for _, shardCoord := range nodesCoordinators { for j := 0; j < len(shardCoord); j++ { - shardCoord[j].SetNodesPerShards(mapValidators) + _ = shardCoord[j].SetNodesPerShards(mapValidators) } } diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index dce6244c950..cc0f44f5337 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -89,8 +89,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { pubKeyMap[0] = []string{string(pk1Bytes), string(pk2Bytes)} validatorsMap := genValidatorsFromPubKeys(pubKeyMap) - nodesCoordinator1.SetNodesPerShards(validatorsMap) - nodesCoordinator2.SetNodesPerShards(validatorsMap) + _ = nodesCoordinator1.SetNodesPerShards(validatorsMap) + _ = nodesCoordinator2.SetNodesPerShards(validatorsMap) _ = nRequester.Start() _ = nResolver.Start() diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 30c5d2e5ab4..6a72c2c416c 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -104,7 +104,7 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt return tempList, nil } -// GetSelectedValidatorsPublicKeys calculates the validators group for a specific randomness, +// GetValidatorsPublicKeys calculates the validators group for a specific randomness, // returning their public keys func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness) From 72422cdd9c4b64bac35e8b4cb5542e5f0209fe08 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 29 Jul 2019 13:41:55 +0300 Subject: [PATCH 045/234] fix after review --- data/state/errors.go | 4 +- data/state/factory/accountCreatorFactory.go | 11 +- .../factory/accountCreatorFactory_test.go | 2 +- data/state/factory/peerAccountCreator.go | 4 +- data/state/peerAccount.go | 9 +- data/state/peerAccountsDB.go | 4 - data/state/peerJournalEntries.go | 40 +- data/state/peerJournalEntries_test.go | 361 +++++++++++++++++- 8 files changed, 402 insertions(+), 33 deletions(-) diff --git a/data/state/errors.go b/data/state/errors.go index c2a4ef1f514..2d7196f5353 100644 --- a/data/state/errors.go +++ b/data/state/errors.go @@ -134,13 +134,11 @@ var ErrBech32WrongAddr = errors.New("wrong bech32 string") // ErrNilStake signals that the provided stake is nil var ErrNilStake = errors.New("stake is nil") -// ErrNilAddress signals that the provided addres is nil -var ErrNilAddress = errors.New("address is nil") - // ErrNilSchnorrPublicKey signals that the provided schnorr public is nil var ErrNilSchnorrPublicKey = errors.New("schnorr public key is nil") // ErrNilBLSPublicKey signals that the provided BLS public key is nil var ErrNilBLSPublicKey = errors.New("bls public key is nil") +// ErrUnknownAccountType signals that the provided account type is unknown var ErrUnknownAccountType = errors.New("account type is unknown") diff --git a/data/state/factory/accountCreatorFactory.go b/data/state/factory/accountCreatorFactory.go index a1452003aa0..069e51445a8 100644 --- a/data/state/factory/accountCreatorFactory.go +++ b/data/state/factory/accountCreatorFactory.go @@ -4,13 +4,16 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" ) +// Type defines account types type Type uint8 const ( - UserAccount Type = 0 - ShardStatistics Type = 1 + // UserAccount identifies an account holding balance, storage updates, code + UserAccount Type = 0 + // ShardStatistics identifies a shard, keeps the statistics + ShardStatistics Type = 1 + // ValidatorAccount identifies an account holding stake, crypto public keys, assigned shard, rating ValidatorAccount Type = 2 - InvalidType Type = 3 ) // NewAccountFactoryCreator returns an account factory depending on shard coordinator self id @@ -22,8 +25,6 @@ func NewAccountFactoryCreator(accountType Type) (state.AccountFactory, error) { return NewMetaAccountCreator(), nil case ValidatorAccount: return NewPeerAccountCreator(), nil - case InvalidType: - return nil, state.ErrUnknownAccountType default: return nil, state.ErrUnknownAccountType } diff --git a/data/state/factory/accountCreatorFactory_test.go b/data/state/factory/accountCreatorFactory_test.go index ebf94f1de76..49d4a2a3e2e 100644 --- a/data/state/factory/accountCreatorFactory_test.go +++ b/data/state/factory/accountCreatorFactory_test.go @@ -12,7 +12,7 @@ import ( func TestNewAccountFactoryCreator_BadType(t *testing.T) { t.Parallel() - accF, err := factory.NewAccountFactoryCreator(factory.InvalidType) + accF, err := factory.NewAccountFactoryCreator(5) assert.Equal(t, err, state.ErrUnknownAccountType) assert.Nil(t, accF) diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go index c81ed74b129..12c4fd510ce 100644 --- a/data/state/factory/peerAccountCreator.go +++ b/data/state/factory/peerAccountCreator.go @@ -2,11 +2,11 @@ package factory import "github.com/ElrondNetwork/elrond-go/data/state" -// MetaAccountCreator has a method to create a new meta accound +// PeerAccountCreator has a method to create a new peer account type PeerAccountCreator struct { } -// NewPeerAccountCreator creates a meta account creator +// NewPeerAccountCreator creates a peer account creator func NewPeerAccountCreator() state.AccountFactory { return &PeerAccountCreator{} } diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 9d78f253495..8c0a9b06353 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -6,16 +6,19 @@ import ( "github.com/ElrondNetwork/elrond-go/data" ) +// TimeStamp is a moment defined by epoch and round type TimeStamp struct { Epoch uint64 Round uint64 } +// TimePeriod holds start and end time type TimePeriod struct { StartTime TimeStamp EndTime TimeStamp } +// SignRate is used to keep the number of success and failed signings type SignRate struct { NrSuccess uint32 NrFailure uint32 @@ -179,7 +182,7 @@ func (a *PeerAccount) DataTrieTracker() DataTrieTracker { // SetAddressWithJournal sets the account's address, saving the old address before changing func (a *PeerAccount) SetAddressWithJournal(address []byte) error { if len(address) < 1 { - return ErrNilAddress + return ErrEmptyAddress } entry, err := NewPeerJournalEntryAddress(a, a.Address) @@ -193,7 +196,7 @@ func (a *PeerAccount) SetAddressWithJournal(address []byte) error { return a.accountTracker.SaveAccount(a) } -// SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing +// SetSchnorrPublicKeyWithJournal sets the account's public key, saving the old key before changing func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { if len(pubKey) < 1 { return ErrNilSchnorrPublicKey @@ -210,7 +213,7 @@ func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { return a.accountTracker.SaveAccount(a) } -// SetSchnorrPublicKeyWithJournal sets the account's address, saving the old address before changing +// SetBLSPublicKeyWithJournal sets the account's bls public key, saving the old key before changing func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { if len(pubKey) < 1 { return ErrNilBLSPublicKey diff --git a/data/state/peerAccountsDB.go b/data/state/peerAccountsDB.go index 5b202313394..0dd6b35c4b8 100644 --- a/data/state/peerAccountsDB.go +++ b/data/state/peerAccountsDB.go @@ -1,10 +1,6 @@ package state -import "github.com/ElrondNetwork/elrond-go/sharding" - // peerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator type peerAccountsDB struct { *AccountsDB - - shardCoordinator sharding.Coordinator } diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go index 9d714cb541b..b0ceeb2f33b 100644 --- a/data/state/peerJournalEntries.go +++ b/data/state/peerJournalEntries.go @@ -37,8 +37,11 @@ type PeerJournalEntrySchnorrPublicKey struct { oldSchnorrPubKey []byte } -// NewPeerJournalEntryAddress outputs a new PeerJournalEntry implementation used to revert a round change -func NewPeerJournalEntrySchnorrPublicKey(account *PeerAccount, oldSchnorrPubKey []byte) (*PeerJournalEntrySchnorrPublicKey, error) { +// NewPeerJournalEntrySchnorrPublicKey outputs a new PeerJournalEntrySchnorrPublicKey implementation used to revert a round change +func NewPeerJournalEntrySchnorrPublicKey( + account *PeerAccount, + oldSchnorrPubKey []byte, +) (*PeerJournalEntrySchnorrPublicKey, error) { if account == nil { return nil, ErrNilAccountHandler } @@ -64,7 +67,7 @@ type PeerJournalEntryBLSPublicKey struct { oldBLSPubKey []byte } -// NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntry implementation used to revert a round change +// NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntryBLSPublicKey implementation used to revert a round change func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { if account == nil { return nil, ErrNilAccountHandler @@ -91,7 +94,7 @@ type PeerJournalEntryStake struct { oldStake *big.Int } -// NewPeerJournalEntryStake outputs a new PeerJournalEntry implementation used to revert a stake change +// NewPeerJournalEntryStake outputs a new PeerJournalEntryStake implementation used to revert a stake change func NewPeerJournalEntryStake(account *PeerAccount, oldStake *big.Int) (*PeerJournalEntryStake, error) { if account == nil { return nil, ErrNilAccountHandler @@ -116,7 +119,7 @@ type PeerJournalEntryJailTime struct { oldPeriod TimePeriod } -// NewPeerJournalEntryJailTime outputs a new PeerJournalEntry implementation used to revert a state change +// NewPeerJournalEntryJailTime outputs a new PeerJournalEntryJailTime implementation used to revert a state change func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) (*PeerJournalEntryJailTime, error) { if account == nil { return nil, ErrNilAccountHandler @@ -141,7 +144,7 @@ type PeerJournalEntryCurrentShardId struct { oldShardId uint32 } -// NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntry implementation used to revert a state change +// NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntryCurrentShardId implementation used to revert a state change func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryCurrentShardId, error) { if account == nil { return nil, ErrNilAccountHandler @@ -166,7 +169,7 @@ type PeerJournalEntryNextShardId struct { oldShardId uint32 } -// NewPeerJournalEntryNextShardId outputs a new PeerJournalEntry implementation used to revert a state change +// NewPeerJournalEntryNextShardId outputs a new PeerJournalEntryNextShardId implementation used to revert a state change func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryNextShardId, error) { if account == nil { return nil, ErrNilAccountHandler @@ -191,8 +194,11 @@ type PeerJournalEntryInWaitingList struct { oldInWaitingList bool } -// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change -func NewPeerJournalEntryInWaitingList(account *PeerAccount, oldInWaitingList bool) (*PeerJournalEntryInWaitingList, error) { +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryInWaitingList implementation used to revert a state change +func NewPeerJournalEntryInWaitingList( + account *PeerAccount, + oldInWaitingList bool, +) (*PeerJournalEntryInWaitingList, error) { if account == nil { return nil, ErrNilAccountHandler } @@ -216,8 +222,11 @@ type PeerJournalEntryValidatorSuccessRate struct { oldSuccessRate SignRate } -// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change -func NewPeerJournalEntryValidatorSuccessRate(account *PeerAccount, oldSuccessRate SignRate) (*PeerJournalEntryValidatorSuccessRate, error) { +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryValidatorSuccessRate implementation used to revert a state change +func NewPeerJournalEntryValidatorSuccessRate( + account *PeerAccount, + oldSuccessRate SignRate, +) (*PeerJournalEntryValidatorSuccessRate, error) { if account == nil { return nil, ErrNilAccountHandler } @@ -241,8 +250,11 @@ type PeerJournalEntryLeaderSuccessRate struct { oldSuccessRate SignRate } -// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntry implementation used to revert a state change -func NewPeerJournalEntryLeaderSuccessRate(account *PeerAccount, oldSuccessRate SignRate) (*PeerJournalEntryLeaderSuccessRate, error) { +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryLeaderSuccessRate implementation used to revert a state change +func NewPeerJournalEntryLeaderSuccessRate( + account *PeerAccount, + oldSuccessRate SignRate, +) (*PeerJournalEntryLeaderSuccessRate, error) { if account == nil { return nil, ErrNilAccountHandler } @@ -266,7 +278,7 @@ type PeerJournalEntryRating struct { oldRating uint32 } -// NewPeerJournalEntryRating outputs a new PeerJournalEntry implementation used to revert a state change +// NewPeerJournalEntryRating outputs a new PeerJournalEntryRating implementation used to revert a state change func NewPeerJournalEntryRating(account *PeerAccount, oldRating uint32) (*PeerJournalEntryRating, error) { if account == nil { return nil, ErrNilAccountHandler diff --git a/data/state/peerJournalEntries_test.go b/data/state/peerJournalEntries_test.go index 7bf2df5b486..06f5edf097d 100644 --- a/data/state/peerJournalEntries_test.go +++ b/data/state/peerJournalEntries_test.go @@ -1 +1,360 @@ -package state +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerJournalEntryAddress_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryAddress(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryAddress_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryAddress(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryAddress_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryAddress(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.Address) +} + +func TestPeerJournalEntrySchnorrPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntrySchnorrPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.SchnorrPublicKey) +} + +func TestPeerJournalEntryBLSPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryBLSPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryBLSPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryBLSPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryBLSPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryBLSPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.BLSPublicKey) +} + +func TestPeerJournalEntryStake_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryStake(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryStake_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryStake(accnt, big.NewInt(9)) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryStake_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + stake := big.NewInt(999) + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryStake(accnt, stake) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), accnt.Stake.Uint64()) +} + +func TestPeerJournalEntryJailTime_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + entry, err := state.NewPeerJournalEntryJailTime(nil, jailTime) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryJailTime_ShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryJailTime(accnt, jailTime) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryJailTime_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryJailTime(accnt, jailTime) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, jailTime, accnt.JailTime) +} + +func TestPeerJournalEntryCurrentShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryCurrentShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryCurrentShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryCurrentShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryCurrentShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryCurrentShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.CurrentShardId) +} + +func TestPeerJournalEntryNextShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryNextShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryNextShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryNextShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryNextShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryNextShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.NextShardId) +} + +func TestPeerJournalEntryInWaitingList_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryInWaitingList(nil, true) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryInWaitingList_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryInWaitingList(accnt, true) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryInWaitingList_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryInWaitingList(accnt, true) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.True(t, accnt.NodeInWaitingList) +} + +func TestPeerJournalEntryValidatorSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.ValidatorSuccessRate) +} + +func TestPeerJournalEntryLeaderSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.LeaderSuccessRate) +} + +func TestPeerJournalEntryRating_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryRating(nil, 10) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryRating_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryRating(accnt, 10) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryRating_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryRating(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.Rating) +} From 3783d5ce1f4a28f04719f579e4da691420121053 Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 29 Jul 2019 14:15:56 +0300 Subject: [PATCH 046/234] added comment --- data/state/factory/accountCreatorFactory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/state/factory/accountCreatorFactory.go b/data/state/factory/accountCreatorFactory.go index 069e51445a8..70e297b53b0 100644 --- a/data/state/factory/accountCreatorFactory.go +++ b/data/state/factory/accountCreatorFactory.go @@ -4,7 +4,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" ) -// Type defines account types +// Type defines account types to save in accounts trie type Type uint8 const ( From 0aa0fabb7e753e5e7bed8bd8bf2281ea80e5234e Mon Sep 17 00:00:00 2001 From: Robert Sasu Date: Mon, 29 Jul 2019 17:40:42 +0300 Subject: [PATCH 047/234] fix after review --- data/state/peerJournalEntries.go | 62 ++++++++++++++++---------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go index b0ceeb2f33b..583995d0be2 100644 --- a/data/state/peerJournalEntries.go +++ b/data/state/peerJournalEntries.go @@ -68,14 +68,14 @@ type PeerJournalEntryBLSPublicKey struct { } // NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntryBLSPublicKey implementation used to revert a round change -func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { +func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldBLSPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryBLSPublicKey{ account: account, - oldBLSPubKey: oldPubKey, + oldBLSPubKey: oldBLSPubKey, }, nil } @@ -115,8 +115,8 @@ func (jeb *PeerJournalEntryStake) Revert() (AccountHandler, error) { // PeerJournalEntryJailTime is used to revert a balance change type PeerJournalEntryJailTime struct { - account *PeerAccount - oldPeriod TimePeriod + account *PeerAccount + oldJailTime TimePeriod } // NewPeerJournalEntryJailTime outputs a new PeerJournalEntryJailTime implementation used to revert a state change @@ -126,14 +126,14 @@ func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) ( } return &PeerJournalEntryJailTime{ - account: account, - oldPeriod: oldJailTime, + account: account, + oldJailTime: oldJailTime, }, nil } // Revert applies undo operation func (jeb *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { - jeb.account.JailTime = jeb.oldPeriod + jeb.account.JailTime = jeb.oldJailTime return jeb.account, nil } @@ -145,14 +145,14 @@ type PeerJournalEntryCurrentShardId struct { } // NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntryCurrentShardId implementation used to revert a state change -func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryCurrentShardId, error) { +func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryCurrentShardId, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryCurrentShardId{ account: account, - oldShardId: oldShId, + oldShardId: oldShardId, }, nil } @@ -170,14 +170,14 @@ type PeerJournalEntryNextShardId struct { } // NewPeerJournalEntryNextShardId outputs a new PeerJournalEntryNextShardId implementation used to revert a state change -func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShId uint32) (*PeerJournalEntryNextShardId, error) { +func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryNextShardId, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryNextShardId{ account: account, - oldShardId: oldShId, + oldShardId: oldShardId, }, nil } @@ -190,84 +190,84 @@ func (jeb *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { // PeerJournalEntryInWaitingList is used to revert a shardId change type PeerJournalEntryInWaitingList struct { - account *PeerAccount - oldInWaitingList bool + account *PeerAccount + oldNodeInWaitingList bool } // NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryInWaitingList implementation used to revert a state change func NewPeerJournalEntryInWaitingList( account *PeerAccount, - oldInWaitingList bool, + oldNodeInWaitingList bool, ) (*PeerJournalEntryInWaitingList, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryInWaitingList{ - account: account, - oldInWaitingList: oldInWaitingList, + account: account, + oldNodeInWaitingList: oldNodeInWaitingList, }, nil } // Revert applies undo operation func (jeb *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { - jeb.account.NodeInWaitingList = jeb.oldInWaitingList + jeb.account.NodeInWaitingList = jeb.oldNodeInWaitingList return jeb.account, nil } // PeerJournalEntryValidatorSuccessRate is used to revert a success rate change type PeerJournalEntryValidatorSuccessRate struct { - account *PeerAccount - oldSuccessRate SignRate + account *PeerAccount + oldValidatorSuccessRate SignRate } -// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryValidatorSuccessRate implementation used to revert a state change +// NewPeerJournalEntryValidatorSuccessRate outputs a new PeerJournalEntryValidatorSuccessRate implementation used to revert a state change func NewPeerJournalEntryValidatorSuccessRate( account *PeerAccount, - oldSuccessRate SignRate, + oldValidatorSuccessRate SignRate, ) (*PeerJournalEntryValidatorSuccessRate, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryValidatorSuccessRate{ - account: account, - oldSuccessRate: oldSuccessRate, + account: account, + oldValidatorSuccessRate: oldValidatorSuccessRate, }, nil } // Revert applies undo operation func (jeb *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { - jeb.account.ValidatorSuccessRate = jeb.oldSuccessRate + jeb.account.ValidatorSuccessRate = jeb.oldValidatorSuccessRate return jeb.account, nil } // PeerJournalEntryLeaderSuccessRate is used to revert a success rate change type PeerJournalEntryLeaderSuccessRate struct { - account *PeerAccount - oldSuccessRate SignRate + account *PeerAccount + oldLeaderSuccessRate SignRate } -// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryLeaderSuccessRate implementation used to revert a state change +// NewPeerJournalEntryLeaderSuccessRate outputs a new PeerJournalEntryLeaderSuccessRate implementation used to revert a state change func NewPeerJournalEntryLeaderSuccessRate( account *PeerAccount, - oldSuccessRate SignRate, + oldLeaderSuccessRate SignRate, ) (*PeerJournalEntryLeaderSuccessRate, error) { if account == nil { return nil, ErrNilAccountHandler } return &PeerJournalEntryLeaderSuccessRate{ - account: account, - oldSuccessRate: oldSuccessRate, + account: account, + oldLeaderSuccessRate: oldLeaderSuccessRate, }, nil } // Revert applies undo operation func (jeb *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { - jeb.account.LeaderSuccessRate = jeb.oldSuccessRate + jeb.account.LeaderSuccessRate = jeb.oldLeaderSuccessRate return jeb.account, nil } From 1abd226195ec2807633151c1c99ea0c6c0e4feef Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 31 Jul 2019 10:55:43 +0300 Subject: [PATCH 048/234] node, main, process, sharding, integrationTests: add nodesCoordinator on the node Move checks for nodes list on the nodesCoordinator creation Adapt unit tests and integration tests. --- cmd/node/main.go | 16 +- consensus/spos/consensusCore.go | 37 ++-- consensus/spos/consensusCoreValidator_test.go | 28 +-- consensus/spos/consensusCore_test.go | 58 +++-- integrationTests/consensus/consensus_test.go | 44 +++- integrationTests/consensus/testInitializer.go | 119 +++++++++-- .../frontend/wallet/txInterception_test.go | 1 + .../multiShard/block/testInitializer.go | 178 +++++++++------- .../multiShard/metablock/testInitializer.go | 165 ++++++++++----- .../smartContract/testInitilalizer.go | 199 ++++++++++-------- .../multiShard/transaction/testInitializer.go | 1 + .../block/interceptedRequestHdr_test.go | 47 ++--- .../interceptedRequestTxBlockBody_test.go | 14 +- .../singleShard/block/testInitializer.go | 101 +++++++-- .../transaction/interceptedBulkTx_test.go | 1 + .../transaction/interceptedResolvedTx_test.go | 1 + node/defineOptions.go | 10 + node/errors.go | 6 +- node/node.go | 57 +---- .../interceptors/headerInterceptor_test.go | 3 +- .../interceptorsContainerFactory_test.go | 7 +- .../interceptorsContainerFactory_test.go | 7 +- process/mock/nodesCoordinatorMock.go | 44 ++-- sharding/errors.go | 7 +- sharding/indexHashedNodesCoordinator.go | 94 +++++---- sharding/indexHashedNodesCoordinator_test.go | 95 ++++++--- sharding/interface.go | 2 - 27 files changed, 826 insertions(+), 516 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 9ea1914b2af..b028f435b31 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -556,6 +556,7 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { privKey, pubKey, shardCoordinator, + nodesCoordinator, coreComponents, stateComponents, dataComponents, @@ -731,15 +732,9 @@ func createNodesCoordinator( return nil, err } - var consensusGroupSize int nbShards := nodesConfig.NumberOfShards() - - if shardId == sharding.MetachainShardId { - consensusGroupSize = int(nodesConfig.MetaChainConsensusGroupSize) - } else { - consensusGroupSize = int(nodesConfig.ConsensusGroupSize) - } - + shardConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) + metaConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) initNodesPubKeys := nodesConfig.InitialNodesPubKeys() initValidators := make(map[uint32][]sharding.Validator) @@ -758,7 +753,8 @@ func createNodesCoordinator( } nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - consensusGroupSize, + shardConsensusGroupSize, + metaConsensusGroupSize, hasher, shardId, nbShards, @@ -836,6 +832,7 @@ func createNode( privKey crypto.PrivateKey, pubKey crypto.PublicKey, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, core *factory.Core, state *factory.State, data *factory.Data, @@ -866,6 +863,7 @@ func createNode( node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(process.Rounder), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithUint64ByteSliceConverter(core.Uint64ByteSliceConverter), node.WithSingleSigner(crypto.SingleSigner), node.WithMultiSigner(crypto.MultiSigner), diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 6c4b066d384..948cf78120c 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,21 +14,21 @@ import ( // ConsensusCore implements ConsensusCoreHandler and provides access to common functionalities // for the rest of the consensus structures type ConsensusCore struct { - blockChain data.ChainHandler - blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker - bootstrapper process.Bootstrapper - broadcastMessenger consensus.BroadcastMessenger - chronologyHandler consensus.ChronologyHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - blsPrivateKey crypto.PrivateKey - blsSingleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - rounder consensus.Rounder - shardCoordinator sharding.Coordinator - syncTimer ntp.SyncTimer - validatorGroupSelector sharding.NodesCoordinator + blockChain data.ChainHandler + blockProcessor process.BlockProcessor + blocksTracker process.BlocksTracker + bootstrapper process.Bootstrapper + broadcastMessenger consensus.BroadcastMessenger + chronologyHandler consensus.ChronologyHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + blsPrivateKey crypto.PrivateKey + blsSingleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + rounder consensus.Rounder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + syncTimer ntp.SyncTimer } // NewConsensusCore creates a new ConsensusCore instance @@ -46,8 +46,9 @@ func NewConsensusCore( multiSigner crypto.MultiSigner, rounder consensus.Rounder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, syncTimer ntp.SyncTimer, - validatorGroupSelector sharding.NodesCoordinator) (*ConsensusCore, error) { +) (*ConsensusCore, error) { consensusCore := &ConsensusCore{ blockChain, @@ -63,8 +64,8 @@ func NewConsensusCore( multiSigner, rounder, shardCoordinator, + nodesCoordinator, syncTimer, - validatorGroupSelector, } err := ValidateConsensusCore(consensusCore) @@ -137,7 +138,7 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore func (cc *ConsensusCore) NodesCoordinator() sharding.NodesCoordinator { - return cc.validatorGroupSelector + return cc.nodesCoordinator } // RandomnessPrivateKey returns the BLS private key stored in the ConsensusStore diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index dfcf67a632b..8b908126912 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -27,19 +27,19 @@ func initConsensusDataContainer() *ConsensusCore { return &ConsensusCore{ blockChain: blockChain, blockProcessor: blockProcessorMock, - blocksTracker: blocksTrackerMock, - bootstrapper: bootstrapperMock, - broadcastMessenger: broadcastMessengerMock, - chronologyHandler: chronologyHandlerMock, - hasher: hasherMock, - marshalizer: marshalizerMock, - blsPrivateKey: blsPrivateKeyMock, - blsSingleSigner: blsSingleSignerMock, - multiSigner: multiSignerMock, - rounder: rounderMock, - shardCoordinator: shardCoordinatorMock, - syncTimer: syncTimerMock, - validatorGroupSelector: validatorGroupSelector, + blocksTracker: blocksTrackerMock, + bootstrapper: bootstrapperMock, + broadcastMessenger: broadcastMessengerMock, + chronologyHandler: chronologyHandlerMock, + hasher: hasherMock, + marshalizer: marshalizerMock, + blsPrivateKey: blsPrivateKeyMock, + blsSingleSigner: blsSingleSignerMock, + multiSigner: multiSignerMock, + rounder: rounderMock, + shardCoordinator: shardCoordinatorMock, + syncTimer: syncTimerMock, + nodesCoordinator: validatorGroupSelector, } } @@ -157,7 +157,7 @@ func TestConsensusContainerValidator_ValidateNilValidatorGroupSelectorShouldFail t.Parallel() container := initConsensusDataContainer() - container.validatorGroupSelector = nil + container.nodesCoordinator = nil err := ValidateConsensusCore(container) diff --git a/consensus/spos/consensusCore_test.go b/consensus/spos/consensusCore_test.go index 0828c83661b..11cd925ffdd 100644 --- a/consensus/spos/consensusCore_test.go +++ b/consensus/spos/consensusCore_test.go @@ -27,8 +27,9 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockChain, err) @@ -53,8 +54,9 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockProcessor, err) @@ -79,8 +81,9 @@ func TestConsensusCore_WithNilBlocksTrackerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlocksTracker, err) @@ -105,8 +108,9 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBootstrapper, err) @@ -131,8 +135,9 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBroadcastMessenger, err) @@ -157,8 +162,9 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilChronologyHandler, err) @@ -183,8 +189,9 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilHasher, err) @@ -209,8 +216,9 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMarshalizer, err) @@ -235,8 +243,9 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsPrivateKey, err) @@ -261,8 +270,9 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsSingleSigner, err) @@ -287,8 +297,9 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { nil, consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMultiSigner, err) @@ -313,8 +324,9 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), nil, consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilRounder, err) @@ -339,14 +351,15 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), nil, + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilShardCoordinator, err) } -func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { +func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -366,13 +379,14 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), nil, - consensusCoreMock.NodesCoordinator()) + consensusCoreMock.SyncTimer(), + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilSyncTimer, err) + assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) } -func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { +func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -391,11 +405,12 @@ func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), - consensusCoreMock.SyncTimer(), - nil) + consensusCoreMock.NodesCoordinator(), + nil, + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) + assert.Equal(t, spos.ErrNilSyncTimer, err) } func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { @@ -417,8 +432,9 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.NodesCoordinator()) + ) assert.NotNil(t, consensusCore) assert.Nil(t, err) diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 01a5305b102..a3eb789e9d5 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -28,7 +28,14 @@ func getPkEncoded(pubKey crypto.PublicKey) string { return encodeAddress(pk) } -func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint64, consensusType string) ([]*testNode, p2p.Messenger, *sync.Map) { +func initNodesAndTest( + numNodes, + consensusSize, + numInvalid uint32, + roundTime uint64, + consensusType string, +) ([]*testNode, p2p.Messenger, *sync.Map) { + fmt.Println("Step 1. Setup nodes...") advertiser := createMessengerWithKadDht(context.Background(), "") @@ -43,24 +50,47 @@ func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint getConnectableAddress(advertiser), consensusType, ) - displayAndStartNodes(nodes) + + for _, nodesList := range nodes { + displayAndStartNodes(nodesList) + } if numInvalid < numNodes { for i := uint32(0); i < numInvalid; i++ { - nodes[i].blkProcessor.ProcessBlockCalled = func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - fmt.Println("process block invalid ", header.GetRound(), header.GetNonce(), getPkEncoded(nodes[i].pk)) + nodes[0][i].blkProcessor.ProcessBlockCalled = func( + blockChain data.ChainHandler, + header data.HeaderHandler, + body data.BodyHandler, + haveTime func() time.Duration, + ) error { + + fmt.Println( + "process block invalid ", + header.GetRound(), + header.GetNonce(), + getPkEncoded(nodes[0][i].pk), + ) return process.ErrInvalidBlockHash } - nodes[i].blkProcessor.CreateBlockHeaderCalled = func(body data.BodyHandler, round uint32, haveTime func() bool) (handler data.HeaderHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockHeaderCalled = func( + body data.BodyHandler, + round uint32, + haveTime func() bool, + ) (handler data.HeaderHandler, e error) { + return nil, process.ErrAccountStateDirty } - nodes[i].blkProcessor.CreateBlockCalled = func(round uint32, haveTime func() bool) (handler data.BodyHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockCalled = func( + round uint32, + haveTime func() bool, + ) (handler data.BodyHandler, e error) { + return nil, process.ErrWrongTypeAssertion } } } - return nodes, advertiser, concMap + return nodes[0], advertiser, concMap } func startNodesWithCommitBlock(nodes []*testNode, mutex *sync.Mutex, nonceForRoundMap map[uint32]uint64, totalCalled *int) { diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index df081773bf0..8615d95bc8e 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -44,6 +44,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "math/big" ) const blsConsensusType = "bls" @@ -73,6 +74,47 @@ type testNode struct { metachainHdrRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) @@ -191,21 +233,37 @@ func createAccountsDB(marshalizer marshal.Marshalizer) state.AccountsAdapter { return adb } -func initialPrivPubKeys(numConsensus int) ([]crypto.PrivateKey, []crypto.PublicKey, crypto.KeyGenerator) { - privKeys := make([]crypto.PrivateKey, 0) - pubKeys := make([]crypto.PublicKey, 0) - - testSuite := kyber.NewSuitePairingBn256() - testKeyGen := signing.NewKeyGenerator(testSuite) +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } - for i := 0; i < numConsensus; i++ { - sk, pk := testKeyGen.GeneratePair() + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs - privKeys = append(privKeys, sk) - pubKeys = append(pubKeys, pk) + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, } - return privKeys, pubKeys, testKeyGen + return params } func createHasher(consensusType string) hashing.Hasher { @@ -217,6 +275,7 @@ func createHasher(consensusType string) hashing.Hasher { func createConsensusOnlyNode( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, shardId uint32, selfId uint32, initialAddr string, @@ -348,6 +407,7 @@ func createConsensusOnlyNode( node.WithAccountsAdapter(accntAdapter), node.WithKeyGen(testKeyGen), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithBlockChain(blockChain), node.WithMultiSigner(testMultiSig), node.WithTxSingleSigner(singlesigner), @@ -374,41 +434,60 @@ func createNodes( roundTime uint64, serviceID string, consensusType string, -) []*testNode { +) map[uint32][]*testNode { + + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, 1, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + nodesList := make([]*testNode, nodesPerShard) - privKeys, pubKeys, testKeyGen := initialPrivPubKeys(nodesPerShard) - //first node generated will have is pk belonging to firstSkShardId - nodes := make([]*testNode, nodesPerShard) + pubKeys := make([]crypto.PublicKey, len(cp.keys[0])) + for idx, keyPairShard := range cp.keys[0] { + pubKeys[idx] = keyPairShard.pk + } for i := 0; i < nodesPerShard; i++ { testNode := &testNode{ shardId: uint32(0), } + kp := cp.keys[0][i] shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(1), uint32(0)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + consensusSize, + 1, + createHasher(consensusType), + 0, + 1, + validatorsMap, + ) + n, mes, blkProcessor, blkc := createConsensusOnlyNode( shardCoordinator, + nodesCoordinator, testNode.shardId, uint32(i), serviceID, uint32(consensusSize), roundTime, - privKeys[i], + kp.sk, pubKeys, - testKeyGen, + cp.keyGen, consensusType, ) testNode.node = n testNode.node = n - testNode.sk = privKeys[i] + testNode.sk = kp.sk testNode.mesenger = mes - testNode.pk = pubKeys[i] + testNode.pk = kp.pk testNode.blkProcessor = blkProcessor testNode.blkc = blkc - nodes[i] = testNode + nodesList[i] = testNode } + nodes[0] = nodesList return nodes } diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index dbdc4395cbe..cf66a6ffd0c 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -109,6 +109,7 @@ func testInterceptedTxFromFrontendGeneratedParams( shardCoordinator := &sharding.OneShardCoordinator{} nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, mock.HasherMock{}, 0, diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index 2be94da9d4a..f5352c0adb7 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -95,6 +95,80 @@ type testNode struct { txsRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createTestShardChain() *blockchain.BlockChain { cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) @@ -187,32 +261,19 @@ func createNetNode( nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, + params *cryptoParams, + keysIndex int, ) ( *node.Node, p2p.Messenger, - crypto.PrivateKey, - crypto.PublicKey, dataRetriever.ResolversFinder, process.BlockProcessor, process.TransactionProcessor, data.ChainHandler) { messenger := createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if shardCoordinator.ComputeId(addr) == targetShardId { - break - } - sk, pk = keyGen.GeneratePair() - } - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestShardChain() @@ -227,8 +288,8 @@ func createNetNode( store, testMarshalizer, testHasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, testMultiSig, dPool, testAddressConverter, @@ -249,7 +310,14 @@ func createNetNode( ) resolversContainer, _ := resolversContainerFactory.Create() resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) txProcessor, _ := transaction.NewTxProcessor( accntAdapter, @@ -330,14 +398,14 @@ func createNetNode( node.WithDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithTxSignPrivKey(sk), - node.WithTxSignPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolversFinder), node.WithBlockProcessor(blockProcessor), @@ -349,7 +417,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, pk, resolversFinder, blockProcessor, txProcessor, blkc + return n, messenger, resolversFinder, blockProcessor, txProcessor, blkc } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -427,21 +495,6 @@ func displayAndStartNodes(nodes map[uint32][]*testNode) { } } -func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } - - return validatorsMap -} - func createNodes( numOfShards int, nodesPerShard int, @@ -451,12 +504,11 @@ func createNodes( //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 nodes := make(map[uint32][]*testNode) - nodesCoordinators := make(map[uint32][]sharding.NodesCoordinator) - nodesPublicKeys := make(map[uint32][]string) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) for shardId := 0; shardId < numOfShards; shardId++ { - shardNodesCoordinators := make([]sharding.NodesCoordinator, 0) - shardPubKeys := make([]string, 0) shardNodes := make([]*testNode, nodesPerShard) for j := 0; j < nodesPerShard; j++ { @@ -467,31 +519,30 @@ func createNodes( shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, testHasher, uint32(shardId), uint32(numOfShards), - make(map[uint32][]sharding.Validator), + validatorsMap, ) - shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) - accntAdapter := createAccountsDB() - n, mes, sk, pk, resFinder, blkProcessor, txProcessor, blkc := createNetNode( + n, mes, resFinder, blkProcessor, txProcessor, blkc := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, nodesCoordinator, testNode.shardId, serviceID, + cp, + j, ) - pubKeyBytes, _ := pk.ToByteArray() - shardPubKeys = append(shardPubKeys, string(pubKeyBytes)) _ = n.CreateShardedStores() testNode.node = n - testNode.sk = sk + testNode.sk = cp.keys[uint32(shardId)][j].sk testNode.messenger = mes - testNode.pk = sk.GeneratePublic() + testNode.pk = cp.keys[uint32(shardId)][j].pk testNode.resFinder = resFinder testNode.accntState = accntAdapter testNode.blkProcessor = blkProcessor @@ -524,7 +575,7 @@ func createNodes( testMarshalizer, mes, shardCoordinator, - sk, + testNode.sk, &singlesig.SchnorrSigner{}, ) @@ -532,22 +583,18 @@ func createNodes( } nodes[uint32(shardId)] = shardNodes - nodesCoordinators[uint32(shardId)] = shardNodesCoordinators - nodesPublicKeys[uint32(shardId)] = shardPubKeys } - metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) - metaNodesPubKeys := make([]string, 0) - metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, testHasher, sharding.MetachainShardId, uint32(numOfShards), - make(map[uint32][]sharding.Validator), + validatorsMap, ) metaNodes[i] = createMetaNetNode( @@ -557,22 +604,9 @@ func createNodes( nodesCoordinator, serviceID, ) - - metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) - pkBytes, _ := metaNodes[i].pk.ToByteArray() - metaNodesPubKeys = append(metaNodesPubKeys, string(pkBytes)) } nodes[sharding.MetachainShardId] = metaNodes - nodesCoordinators[sharding.MetachainShardId] = metaNodesCoordinators - nodesPublicKeys[sharding.MetachainShardId] = metaNodesPubKeys - mapValidators := genValidatorsFromPubKeys(nodesPublicKeys) - - for _, shardCoord := range nodesCoordinators { - for j := 0; j < len(shardCoord); j++ { - _ = shardCoord[j].SetNodesPerShards(mapValidators) - } - } return nodes } diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 04a505c0ce1..d5af011a449 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -81,6 +81,17 @@ type testNode struct { metachainHdrRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + //------- Common func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { @@ -98,6 +109,54 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard return validatorsMap } +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createMemUnit() storage.Storer { cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) persist, _ := memorydb.New() @@ -186,22 +245,25 @@ func displayAndStartNodes(nodes []*testNode) { func createNodes( nodesInMetachain int, - senderShard uint32, + shardId uint32, initialAddr string, hasher hashing.Hasher, ) []*testNode { - nodesCoordMap := make(map[uint32][]sharding.NodesCoordinator) - pkMap := make(map[uint32][]string) nodes := make([]*testNode, nodesInMetachain+1) //first node is a shard node - shardCoordinator, _ := sharding.NewMultiShardCoordinator(1, senderShard) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(1, shardId) + cp := createCryptoParams(1, nodesInMetachain, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, - senderShard, + shardId, 1, - make(map[uint32][]sharding.Validator), + validatorsMap, ) nodes[0] = createShardNetNode( @@ -210,22 +272,19 @@ func createNodes( shardCoordinator, nodesCoordinator, initialAddr, + cp, + 0, ) - pk, _ := nodes[0].pk.ToByteArray() - nodesCoordMap[0] = []sharding.NodesCoordinator{nodesCoordinator} - pkMap[0] = []string{string(pk)} - - metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) - metaPubKeys := make([]string, 0) for i := 0; i < nodesInMetachain; i++ { shardCoordinator, _ = sharding.NewMultiShardCoordinator(1, sharding.MetachainShardId) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, sharding.MetachainShardId, 1, - make(map[uint32][]sharding.Validator), + validatorsMap, ) nodes[i+1] = createMetaNetNode( createTestMetaDataPool(), @@ -233,20 +292,9 @@ func createNodes( shardCoordinator, nodesCoordinator, initialAddr, + cp, + i, ) - metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) - pk, _ := nodes[i+1].pk.ToByteArray() - metaPubKeys = append(metaPubKeys, string(pk)) - } - - nodesCoordMap[sharding.MetachainShardId] = metaNodesCoordinators - pkMap[sharding.MetachainShardId] = metaPubKeys - valMap := genValidatorsFromPubKeys(pkMap) - - for _, nodeCoordList := range nodesCoordMap { - for _, nodeCoord := range nodeCoordList { - _ = nodeCoord.SetNodesPerShards(valMap) - } } return nodes @@ -317,16 +365,14 @@ func createShardNetNode( shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { tn := testNode{} tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[shardCoordinator.SelfId()][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestShardChain() @@ -342,8 +388,8 @@ func createShardNetNode( store, testMarshalizer, testHasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, testMultiSig, dPool, addConverter, @@ -364,7 +410,14 @@ func createShardNetNode( ) resolversContainer, _ := resolversContainerFactory.Create() tn.resolvers, _ = containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(tn.resolvers, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + tn.resolvers, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) fact, _ := shard.NewPreProcessorsContainerFactory( shardCoordinator, @@ -424,8 +477,8 @@ func createShardNetNode( testMarshalizer, tn.messenger, shardCoordinator, - sk, - singleSigner, + keyPair.sk, + params.singleSigner, ) n, err := node.NewNode( @@ -435,14 +488,14 @@ func createShardNetNode( node.WithDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithPrivKey(sk), - node.WithPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(tn.resolvers), node.WithBlockProcessor(blockProcessor), @@ -454,8 +507,8 @@ func createShardNetNode( } tn.node = n - tn.sk = sk - tn.pk = pk + tn.sk = keyPair.sk + tn.pk = keyPair.pk tn.shard = fmt.Sprintf("%d", shardCoordinator.SelfId()) tn.shardDataPool = dPool @@ -522,17 +575,15 @@ func createMetaNetNode( shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { tn := testNode{} tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestMetaChain() @@ -590,8 +641,8 @@ func createMetaNetNode( testMarshalizer, tn.messenger, shardCoordinator, - sk, - singleSigner, + keyPair.sk, + params.singleSigner, ) n, err := node.NewNode( @@ -601,14 +652,14 @@ func createMetaNetNode( node.WithMetaDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithPrivKey(sk), - node.WithPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(tn.resolvers), node.WithBlockProcessor(blockProcessor), @@ -620,8 +671,8 @@ func createMetaNetNode( } tn.node = n - tn.sk = sk - tn.pk = pk + tn.sk = keyPair.sk + tn.pk = keyPair.pk tn.shard = "meta" tn.metaDataPool = dPool diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index b409ce95807..ca11ed82228 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -102,6 +102,80 @@ type testNode struct { txsRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createTestShardChain() *blockchain.BlockChain { cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) @@ -194,11 +268,11 @@ func createNetNode( nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, + params *cryptoParams, + keysIndex int, ) ( *node.Node, p2p.Messenger, - crypto.PrivateKey, - crypto.PublicKey, dataRetriever.ResolversFinder, process.BlockProcessor, process.TransactionProcessor, @@ -208,22 +282,9 @@ func createNetNode( dataRetriever.StorageService) { messenger := createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if shardCoordinator.ComputeId(addr) == targetShardId { - break - } - sk, pk = keyGen.GeneratePair() - } - - pkBuff, _ := pk.ToByteArray() - fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestShardChain() store := createTestShardStore(shardCoordinator.NumberOfShards()) @@ -237,8 +298,8 @@ func createNetNode( store, testMarshalizer, testHasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, testMultiSig, dPool, testAddressConverter, @@ -371,14 +432,14 @@ func createNetNode( node.WithDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithTxSignPrivKey(sk), - node.WithTxSignPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolversFinder), node.WithBlockProcessor(blockProcessor), @@ -390,7 +451,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, pk, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -438,21 +499,6 @@ func displayAndStartNodes(nodes map[uint32][]*testNode) { } } -func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } - - return validatorsMap -} - func createNodes( numOfShards int, nodesPerShard int, @@ -462,12 +508,11 @@ func createNodes( //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 nodes := make(map[uint32][]*testNode) - nodesCoordinators := make(map[uint32][]sharding.NodesCoordinator) - nodesPublicKeys := make(map[uint32][]string) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) for shardId := 0; shardId < numOfShards; shardId++ { - shardNodesCoordinators := make([]sharding.NodesCoordinator, 0) - shardPubKeys := make([]string, 0) shardNodes := make([]*testNode, nodesPerShard) for j := 0; j < nodesPerShard; j++ { @@ -478,31 +523,32 @@ func createNodes( shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, testHasher, uint32(shardId), uint32(numOfShards), - make(map[uint32][]sharding.Validator), + validatorsMap, ) - shardNodesCoordinators = append(shardNodesCoordinators, nodesCoordinator) accntAdapter := createAccountsDB() - n, mes, sk, pk, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, nodesCoordinator, testNode.shardId, serviceID, + cp, + j, ) - pubKeyBytes, _ := pk.ToByteArray() - shardPubKeys = append(shardPubKeys, string(pubKeyBytes)) _ = n.CreateShardedStores() + KeyPair := cp.keys[uint32(shardId)][j] testNode.node = n - testNode.sk = sk + testNode.sk = KeyPair.sk testNode.messenger = mes - testNode.pk = sk.GeneratePublic() + testNode.pk = KeyPair.pk testNode.resFinder = resFinder testNode.accntState = accntAdapter testNode.blkProcessor = blkProcessor @@ -538,7 +584,7 @@ func createNodes( testMarshalizer, mes, shardCoordinator, - sk, + KeyPair.sk, &singlesig.SchnorrSigner{}, ) @@ -546,22 +592,18 @@ func createNodes( } nodes[uint32(shardId)] = shardNodes - nodesCoordinators[uint32(shardId)] = shardNodesCoordinators - nodesPublicKeys[uint32(shardId)] = shardPubKeys } - metaNodesCoordinators := make([]sharding.NodesCoordinator, 0) - metaNodesPubKeys := make([]string, 0) - metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, testHasher, sharding.MetachainShardId, uint32(numOfShards), - make(map[uint32][]sharding.Validator), + validatorsMap, ) metaNodes[i] = createMetaNetNode( @@ -570,23 +612,12 @@ func createNodes( shardCoordinatorMeta, nodesCoordinator, serviceID, + cp, + i, ) - - metaNodesCoordinators = append(metaNodesCoordinators, nodesCoordinator) - pkBytes, _ := metaNodes[i].pk.ToByteArray() - metaNodesPubKeys = append(metaNodesPubKeys, string(pkBytes)) } nodes[sharding.MetachainShardId] = metaNodes - nodesCoordinators[sharding.MetachainShardId] = metaNodesCoordinators - nodesPublicKeys[sharding.MetachainShardId] = metaNodesPubKeys - mapValidators := genValidatorsFromPubKeys(nodesPublicKeys) - - for _, shardCoord := range nodesCoordinators { - for j := 0; j < len(shardCoord); j++ { - _ = shardCoord[j].SetNodesPerShards(mapValidators) - } - } return nodes } @@ -643,17 +674,15 @@ func createMetaNetNode( shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { tn := testNode{} tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) tn.blkc = createTestMetaChain() @@ -720,8 +749,8 @@ func createMetaNetNode( testMarshalizer, tn.messenger, shardCoordinator, - sk, - singleSigner, + keyPair.sk, + params.singleSigner, ) n, err := node.NewNode( @@ -731,14 +760,14 @@ func createMetaNetNode( node.WithMetaDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(tn.blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithPrivKey(sk), - node.WithPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolvers), node.WithBlockProcessor(tn.blkProcessor), @@ -751,8 +780,8 @@ func createMetaNetNode( } tn.node = n - tn.sk = sk - tn.pk = pk + tn.sk = keyPair.sk + tn.pk = keyPair.pk tn.accntState = accntAdapter tn.shardId = sharding.MetachainShardId diff --git a/integrationTests/multiShard/transaction/testInitializer.go b/integrationTests/multiShard/transaction/testInitializer.go index e19cdd18353..bb29ef6ca40 100644 --- a/integrationTests/multiShard/transaction/testInitializer.go +++ b/integrationTests/multiShard/transaction/testInitializer.go @@ -371,6 +371,7 @@ func createNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( 3, + 1, hasher, uint32(shardId), uint32(numOfShards), diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index cc0f44f5337..de48aa4988f 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -3,7 +3,6 @@ package block import ( "encoding/base64" "fmt" - "math/big" "reflect" "sync" "testing" @@ -21,21 +20,6 @@ import ( "github.com/stretchr/testify/assert" ) -func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } - - return validatorsMap -} - func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -50,48 +34,49 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { storeResolver := createTestStore() shardCoordinator := &sharding.OneShardCoordinator{} + cp := createCryptoParams(2, 1, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, 0, 1, - make(map[uint32][]sharding.Validator), + validatorsMap, ) nodesCoordinator2, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, 0, 1, - make(map[uint32][]sharding.Validator), + validatorsMap, ) fmt.Println("Requester:") - nRequester, mesRequester, _, pk1, multiSigner, resolversFinder := createNetNode( + nRequester, mesRequester, multiSigner, resolversFinder := createNetNode( dPoolRequester, storeRequester, createAccountsDB(), shardCoordinator, nodesCoordinator1, + cp, + 0, ) fmt.Println("Resolver:") - nResolver, mesResolver, _, pk2, _, _ := createNetNode( + nResolver, mesResolver, _, _ := createNetNode( dPoolResolver, storeResolver, createAccountsDB(), shardCoordinator, nodesCoordinator2, + cp, + 1, ) - pubKeyMap := make(map[uint32][]string) - pk1Bytes, _ := pk1.ToByteArray() - pk2Bytes, _ := pk2.ToByteArray() - - pubKeyMap[0] = []string{string(pk1Bytes), string(pk2Bytes)} - validatorsMap := genValidatorsFromPubKeys(pubKeyMap) - _ = nodesCoordinator1.SetNodesPerShards(validatorsMap) - _ = nodesCoordinator2.SetNodesPerShards(validatorsMap) - _ = nRequester.Start() _ = nResolver.Start() defer func() { @@ -137,8 +122,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { hdrBuff, _ := marshalizer.Marshal(&hdr1) hdrHash := hasher.Compute(string(hdrBuff)) - msig, _ := multiSigner.Create(pubKeyMap[0], 0) - bitmap := []byte{1, 0, 0} + msig, _ := multiSigner.Create(keysMap[0], 0) + bitmap := []byte{1} _, _ = msig.CreateSignatureShare(hdrHash, bitmap) aggSig, _ := msig.AggregateSigs(bitmap) diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go index 3bf25d84b0a..1d45171672a 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody_test.go @@ -26,32 +26,40 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { dPoolRequester := createTestDataPool() dPoolResolver := createTestDataPool() + cp := createCryptoParams(2, 1, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) shardCoordinator := &sharding.OneShardCoordinator{} nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, 0, 1, - make(map[uint32][]sharding.Validator), + validatorsMap, ) fmt.Println("Requester: ") - nRequester, mesRequester, _, _, _, resolversFinder := createNetNode( + nRequester, mesRequester, _, resolversFinder := createNetNode( dPoolRequester, createTestStore(), createAccountsDB(), shardCoordinator, nodesCoordinator, + cp, + 0, ) fmt.Println("Resolver:") - nResolver, mesResolver, _, _, _, _ := createNetNode( + nResolver, mesResolver, _, _ := createNetNode( dPoolResolver, createTestStore(), createAccountsDB(), shardCoordinator, nodesCoordinator, + cp, + 1, ) _ = nRequester.Start() diff --git a/integrationTests/singleShard/block/testInitializer.go b/integrationTests/singleShard/block/testInitializer.go index c5ffbc1e39c..5e12ce87b6e 100644 --- a/integrationTests/singleShard/block/testInitializer.go +++ b/integrationTests/singleShard/block/testInitializer.go @@ -43,10 +43,85 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "math/big" ) var r io.Reader +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func init() { r = rand.New(rand.NewSource(time.Now().UnixNano())) } @@ -153,11 +228,11 @@ func createNetNode( accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, + params *cryptoParams, + keysIndex int, ) ( *node.Node, p2p.Messenger, - crypto.PrivateKey, - crypto.PublicKey, crypto.MultiSigner, dataRetriever.ResolversFinder) { @@ -168,12 +243,8 @@ func createNetNode( messenger := createMessenger(context.Background()) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - - suite := kyber.NewSuitePairingBn256() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasherSigning) + keyPair := params.keys[shardCoordinator.SelfId()][keysIndex] + multiSigner, _ := createMultiSigner(keyPair.sk, keyPair.pk, params.keyGen, hasherSigning) blkc := createTestBlockChain() uint64Converter := uint64ByteSlice.NewBigEndianConverter() dataPacker, _ := partitioning.NewSizeDataPacker(marshalizer) @@ -185,8 +256,8 @@ func createNetNode( store, marshalizer, hasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, multiSigner, dPool, addrConverter, @@ -212,11 +283,11 @@ func createNetNode( node.WithDataPool(dPool), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accntAdapter), - node.WithSingleSigner(singleSigner), + node.WithSingleSigner(params.singleSigner), node.WithMultiSigner(multiSigner), - node.WithKeyGen(keyGen), - node.WithTxSignPrivKey(sk), - node.WithTxSignPubKey(pk), + node.WithKeyGen(params.keyGen), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), @@ -225,7 +296,7 @@ func createNetNode( node.WithDataStore(store), ) - return n, messenger, sk, pk, multiSigner, resolversFinder + return n, messenger, multiSigner, resolversFinder } func createMessenger(ctx context.Context) p2p.Messenger { diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go index dcab25d5bb3..dd2a712e41e 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx_test.go @@ -32,6 +32,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { shardCoordinator := &sharding.OneShardCoordinator{} nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, 0, diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index d88b5864a77..f0707689b0e 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -30,6 +30,7 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { shardCoordinator := &sharding.OneShardCoordinator{} nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, hasher, 0, diff --git a/node/defineOptions.go b/node/defineOptions.go index 01d6ad60fee..7b1e1e72574 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -265,6 +265,16 @@ func WithShardCoordinator(shardCoordinator sharding.Coordinator) Option { } } +func WithNodesCoordinator(nodesCoordinator sharding.NodesCoordinator) Option { + return func(n *Node) error { + if nodesCoordinator == nil { + return ErrNilNodesCoordinator + } + n.nodesCoordinator = nodesCoordinator + return nil + } +} + // WithUint64ByteSliceConverter sets up the uint64 <-> []byte converter func WithUint64ByteSliceConverter(converter typeConverters.Uint64ByteSliceConverter) Option { return func(n *Node) error { diff --git a/node/errors.go b/node/errors.go index e312dedc6ec..22f7c79520e 100644 --- a/node/errors.go +++ b/node/errors.go @@ -58,6 +58,9 @@ var ErrNilDataPool = errors.New("trying to set nil data pool") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("trying to set nil shard coordinator") +// ErrNilNodesCoordinator signals that a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("trying to set nil nodes coordinator") + // ErrNilUint64ByteSliceConverter signals that a nil uint64 <-> byte slice converter has been provided var ErrNilUint64ByteSliceConverter = errors.New("trying to set nil uint64 - byte slice converter") @@ -88,9 +91,6 @@ var ErrNilBlockHeader = errors.New("block header is nil") // ErrNilTxBlockBody is raised when a valid tx block body is expected but nil was used var ErrNilTxBlockBody = errors.New("tx block body is nil") -// ErrNilMetaBlockHeader is raised when a valid metablock is expected but nil was provided -var ErrNilMetaBlockHeader = errors.New("meta block header is nil") - // ErrWrongTypeAssertion is raised when a type assertion occurs var ErrWrongTypeAssertion = errors.New("wrong type assertion: expected *block.Header") diff --git a/node/node.go b/node/node.go index 4921bf05e85..cfe80044d2d 100644 --- a/node/node.go +++ b/node/node.go @@ -88,6 +88,7 @@ type Node struct { metaDataPool dataRetriever.MetaPoolsHolder store dataRetriever.StorageService shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator consensusTopic string consensusType string @@ -267,11 +268,6 @@ func (n *Node) StartConsensus() error { return err } - nCoordinator, err := n.createNodesCoordinator() - if err != nil { - return err - } - consensusDataContainer, err := spos.NewConsensusCore( n.blkc, n.blockProcessor, @@ -286,8 +282,8 @@ func (n *Node) StartConsensus() error { n.multiSigner, n.rounder, n.shardCoordinator, + n.nodesCoordinator, n.syncTimer, - nCoordinator, ) if err != nil { return err @@ -486,55 +482,6 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { return consensusState, nil } -// createNodesCoordinator creates a index hashed group selector object -func (n *Node) createNodesCoordinator() (sharding.NodesCoordinator, error) { - var err error - - nodesMap := make(map[uint32][]sharding.Validator) - nbShards := n.shardCoordinator.NumberOfShards() - - for sh := uint32(0); sh < nbShards; sh++ { - err = n.createValidatorsForShard(nodesMap, sh) - if err != nil { - return nil, err - } - } - - err = n.createValidatorsForShard(nodesMap, sharding.MetachainShardId) - if err != nil { - return nil, err - } - - nCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - n.consensusGroupSize, - n.hasher, - n.shardCoordinator.SelfId(), - n.shardCoordinator.NumberOfShards(), - nodesMap, - ) - if err != nil { - return nil, err - } - - return nCoordinator, nil -} - -func (n *Node) createValidatorsForShard(nodesMap map[uint32][]sharding.Validator, shId uint32) (err error) { - nodesInShard := len(n.initialNodesPubkeys[shId]) - nodesMap[shId] = make([]sharding.Validator, nodesInShard) - - for i := 0; i < nodesInShard; i++ { - validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shId][i])) - if err != nil { - return err - } - - nodesMap[shId][i] = validator - } - - return nil -} - // createConsensusTopic creates a consensus topic for node func (n *Node) createConsensusTopic(messageProcessor p2p.MessageProcessor, shardCoordinator sharding.Coordinator) error { if shardCoordinator == nil { diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index a7e5fc7d7e4..2d165a07a07 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -332,7 +332,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( nodesCoordinator := &mock.NodesCoordinatorMock{ NbShards: 5, - ConsensusSize: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, ShardId: 2, } diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index aa24113c087..6cca5fc63f8 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -354,9 +354,10 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 nodesCoordinator := &mock.NodesCoordinatorMock{ - ConsensusSize: 1, - NbShards: uint32(noOfShards), - ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, } icf, _ := metachain.NewInterceptorsContainerFactory( diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 4d0639e9f1d..1bd7734935e 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -602,9 +602,10 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 nodesCoordinator := &mock.NodesCoordinatorMock{ - ShardId: 1, - ConsensusSize: 1, - NbShards: uint32(noOfShards), + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), } icf, _ := shard.NewInterceptorsContainerFactory( diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 14167833a88..26cbd7d33fe 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -7,23 +7,23 @@ import ( // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { Validators map[uint32][]sharding.Validator - ConsensusSize uint32 + ShardConsensusSize uint32 + MetaConsensusSize uint32 ShardId uint32 NbShards uint32 GetSelectedPublicKeysCalled func(selection []byte) (publicKeys []string, err error) GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error ComputeValidatorsGroupCalled func(randomness []byte) (validatorsGroup []sharding.Validator, err error) - ConsensusGroupSizeCalled func() int - SetConsensusGroupSizeCalled func(int) error } func NewNodesCoordinatorMock() *NodesCoordinatorMock { return &NodesCoordinatorMock{ - ConsensusSize: 1, - ShardId: 0, - NbShards: 1, - Validators: make(map[uint32][]sharding.Validator), + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: 1, + Validators: make(map[uint32][]sharding.Validator), } } @@ -32,15 +32,11 @@ func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (public return ncm.GetSelectedPublicKeysCalled(selection) } - pubKeys := make([]string, 0) - if len(ncm.Validators) == 0 { return nil, sharding.ErrNilInputNodesMap } - if len(ncm.Validators[ncm.ShardId]) < int(ncm.ConsensusSize) { - return nil, sharding.ErrSmallEligibleListSize - } + pubKeys := make([]string, 0) for _, v := range ncm.Validators[ncm.ShardId] { pubKeys = append(pubKeys, string(v.PubKey())) @@ -83,12 +79,16 @@ func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.V } func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sharding.Validator, error) { + var consensusSize uint32 + if ncm.ComputeValidatorsGroupCalled != nil { return ncm.ComputeValidatorsGroupCalled(randomess) } - if len(ncm.Validators[ncm.ShardId]) < int(ncm.ConsensusSize) { - return nil, sharding.ErrSmallEligibleListSize + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize } if randomess == nil { @@ -97,23 +97,9 @@ func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sha validatorsGroup := make([]sharding.Validator, 0) - for i := uint32(0); i < ncm.ConsensusSize; i++ { + for i := uint32(0); i < consensusSize; i++ { validatorsGroup = append(validatorsGroup, ncm.Validators[ncm.ShardId][i]) } return validatorsGroup, nil } - -func (ncm *NodesCoordinatorMock) ConsensusGroupSize() int { - if ncm.ConsensusGroupSizeCalled != nil { - return ncm.ConsensusGroupSizeCalled() - } - - return int(ncm.ConsensusSize) -} - -func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(size int) error { - ncm.ConsensusSize = uint32(size) - - return nil -} diff --git a/sharding/errors.go b/sharding/errors.go index 8a9314e5514..09af0881fce 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -43,8 +43,11 @@ var ErrNodesSizeSmallerThanMinNoOfNodes = errors.New("length of nodes defined is // ErrNilInputNodesMap signals that a nil nodes map was provided var ErrNilInputNodesMap = errors.New("nil input nodes map") -// ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size -var ErrSmallEligibleListSize = errors.New("small eligible list size") +// ErrSmallShardEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallShardEligibleListSize = errors.New("small shard eligible list size") + +// ErrSmallMetachainEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallMetachainEligibleListSize = errors.New("small metachain eligible list size") // ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 6a72c2c416c..0f37eca7ed6 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -9,23 +9,25 @@ import ( ) type indexHashedNodesCoordinator struct { - nbShards uint32 - shardId uint32 - hasher hashing.Hasher - nodesMap map[uint32][]Validator - expandedEligibleList []Validator - consensusGroupSize int + nbShards uint32 + shardId uint32 + hasher hashing.Hasher + nodesMap map[uint32][]Validator + expandedEligibleList []Validator + shardConsensusGroupSize int + metaConsensusGroupSize int } // NewIndexHashedNodesCoordinator creates a new index hashed group selector func NewIndexHashedNodesCoordinator( - consensusGroupSize int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, hasher hashing.Hasher, shardId uint32, nbShards uint32, nodes map[uint32][]Validator, ) (*indexHashedNodesCoordinator, error) { - if consensusGroupSize < 1 { + if shardConsensusGroupSize < 1 || metaConsensusGroupSize < 1 { return nil, ErrInvalidConsensusGroupSize } @@ -42,19 +44,16 @@ func NewIndexHashedNodesCoordinator( } ihgs := &indexHashedNodesCoordinator{ - nbShards: nbShards, - shardId: shardId, - hasher: hasher, - nodesMap: make(map[uint32][]Validator), - expandedEligibleList: make([]Validator, 0), + nbShards: nbShards, + shardId: shardId, + hasher: hasher, + nodesMap: make(map[uint32][]Validator), + expandedEligibleList: make([]Validator, 0), + shardConsensusGroupSize: shardConsensusGroupSize, + metaConsensusGroupSize: metaConsensusGroupSize, } - err := ihgs.SetConsensusGroupSize(consensusGroupSize) - if err != nil { - return nil, err - } - - err = ihgs.SetNodesPerShards(nodes) + err := ihgs.SetNodesPerShards(nodes) if err != nil { return nil, err } @@ -68,6 +67,18 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Va return ErrNilInputNodesMap } + nodesList, ok := nodes[MetachainShardId] + if ok && len(nodesList) < ihgs.metaConsensusGroupSize { + return ErrSmallMetachainEligibleListSize + } + + for shardId := uint32(0); shardId < ihgs.nbShards; shardId++ { + nbNodesShard := len(nodes[shardId]) + if nbNodesShard < ihgs.shardConsensusGroupSize { + return ErrSmallShardEligibleListSize + } + } + ihgs.nodesMap = nodes ihgs.expandedEligibleList = ihgs.expandEligibleList() @@ -84,17 +95,24 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Va // the item at the new proposed index is not found in the list. This new proposed index will be called checked index // 4. the item at the checked index is appended in the temp validator list func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) { - if len(ihgs.nodesMap[ihgs.shardId]) < ihgs.consensusGroupSize { - return nil, ErrSmallEligibleListSize - } - if randomness == nil { return nil, ErrNilRandomness } tempList := make([]Validator, 0) + var consensusGroupSize int - for startIdx := 0; startIdx < ihgs.consensusGroupSize; startIdx++ { + if ihgs == nil { + return nil, ErrNilRandomness + } + + if ihgs.shardId == MetachainShardId { + consensusGroupSize = ihgs.metaConsensusGroupSize + } else { + consensusGroupSize = ihgs.shardConsensusGroupSize + } + + for startIdx := 0; startIdx < consensusGroupSize; startIdx++ { proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) checkedIndex := ihgs.checkIndex(proposedIndex, tempList) @@ -127,12 +145,19 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte shardEligibleLen := uint16(len(ihgs.nodesMap[ihgs.shardId])) invalidSelection := selectionLen < shardEligibleLen + var consensusGroupSize int if invalidSelection { return nil, ErrEligibleSelectionMismatch } - publicKeys = make([]string, ihgs.consensusGroupSize) + if ihgs.shardId == MetachainShardId { + consensusGroupSize = ihgs.shardConsensusGroupSize + } else { + consensusGroupSize = ihgs.metaConsensusGroupSize + } + + publicKeys = make([]string, consensusGroupSize) cnt := 0 for i := uint16(0); i < shardEligibleLen; i++ { @@ -145,12 +170,12 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) publicKeys[cnt] = string(ihgs.nodesMap[ihgs.shardId][i].PubKey()) cnt++ - if cnt > ihgs.consensusGroupSize { + if cnt > consensusGroupSize { return nil, ErrEligibleTooManySelections } } - if cnt < ihgs.consensusGroupSize { + if cnt < consensusGroupSize { return nil, ErrEligibleTooFewSelections } @@ -205,18 +230,3 @@ func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []V return false } - -// ConsensusGroupSize returns the consensus group size -func (ihgs *indexHashedNodesCoordinator) ConsensusGroupSize() int { - return ihgs.consensusGroupSize -} - -// SetConsensusGroupSize sets the consensus group size -func (ihgs *indexHashedNodesCoordinator) SetConsensusGroupSize(consensusGroupSize int) error { - if consensusGroupSize < 1 { - return ErrInvalidConsensusGroupSize - } - - ihgs.consensusGroupSize = consensusGroupSize - return nil -} diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 5a983cc38e3..fd12cb7b976 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -28,8 +28,14 @@ func createDummyNodesMap() map[uint32][]sharding.Validator { mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), } + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 1, []byte("pkMeta1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pkMeta2")), + } + nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list + nodesMap[sharding.MetachainShardId] = listMeta return nodesMap } @@ -41,7 +47,14 @@ func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { nodesMap := createDummyNodesMap() - ihgs, err := sharding.NewIndexHashedNodesCoordinator(1, nil, 0, 1, nodesMap) + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + nil, + 0, + 1, + nodesMap, + ) assert.Nil(t, ihgs) assert.Equal(t, sharding.ErrNilHasher, err) @@ -53,6 +66,7 @@ func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testi nodesMap := createDummyNodesMap() ihgs, err := sharding.NewIndexHashedNodesCoordinator( 0, + 1, mock.HasherMock{}, 0, 1, @@ -68,6 +82,7 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { nodesMap := createDummyNodesMap() ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, 1, mock.HasherMock{}, 0, @@ -81,12 +96,13 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { //------- LoadEligibleList -func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { +func TestIndexHashedGroupSelector_SetNilNodesMapShouldErr(t *testing.T) { t.Parallel() nodesMap := createDummyNodesMap() ihgs, _ := sharding.NewIndexHashedNodesCoordinator( - 10, + 2, + 1, mock.HasherMock{}, 0, 1, @@ -101,7 +117,8 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { nodesMap := createDummyNodesMap() ihgs, err := sharding.NewIndexHashedNodesCoordinator( - 10, + 2, + 1, mock.HasherMock{}, 0, 1, @@ -114,41 +131,38 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { //------- ComputeValidatorsGroup -func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { +func TestIndexHashedGroupSelector_NewCoordinatorGroup0SizeShouldErr(t *testing.T) { t.Parallel() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, 1, mock.HasherMock{}, 0, 1, - make(map[uint32][]sharding.Validator), + nodesMap, ) - list := make([]sharding.Validator, 0) - - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, sharding.ErrSmallEligibleListSize, err) + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) } -func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { +func TestIndexHashedGroupSelector_NewCoordinatorTooFewNodesShouldErr(t *testing.T) { t.Parallel() nodesMap := createDummyNodesMap() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + ihgs, err := sharding.NewIndexHashedNodesCoordinator( 10, + 1, mock.HasherMock{}, 0, 1, nodesMap, ) - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, sharding.ErrSmallEligibleListSize, err) + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrSmallShardEligibleListSize, err) } func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { @@ -157,6 +171,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t nodesMap := createDummyNodesMap() ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 2, + 1, mock.HasherMock{}, 0, 1, @@ -181,6 +196,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSa nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, mock.HasherMock{}, 0, @@ -217,7 +233,13 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi } nodesMap := createDummyNodesMap() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -257,7 +279,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) + metaNode, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{metaNode} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -289,7 +320,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex } nodesMap := createDummyNodesMap() - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(2, hasher, 0, 1, nodesMap) + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -357,7 +395,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - ihgs, _ := sharding.NewIndexHashedNodesCoordinator(6, hasher, 0, 1, nodesMap) + validatorMeta, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{validatorMeta} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 6, + 1, + hasher, + 0, + 1, + nodesMap, + ) list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) @@ -370,7 +417,6 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho assert.Equal(t, validator0, list2[3]) assert.Equal(t, validator3, list2[4]) assert.Equal(t, validator4, list2[5]) - } func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { @@ -387,6 +433,7 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. ihgs, _ := sharding.NewIndexHashedNodesCoordinator( consensusGroupSize, + 1, mock.HasherMock{}, 0, 1, diff --git a/sharding/interface.go b/sharding/interface.go index a4400995c34..e42840e9166 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -31,8 +31,6 @@ type NodesCoordinator interface { PublicKeysSelector SetNodesPerShards(nodes map[uint32][]Validator) error ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) - ConsensusGroupSize() int - SetConsensusGroupSize(int) error } // PublicKeysSelector allows retrieval of eligible validators public keys From 9036c17bce9969b59d7e4b738cf9176c21c094dc Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 31 Jul 2019 19:00:22 +0300 Subject: [PATCH 049/234] data, process, integrationTests: fix merge issues Adapt to integration tests deltas --- data/mock/txTypeHandlerMock.go | 18 +++++ data/mock/unsignedTxHandlerMock.go | 51 +++++++++++++ data/state/peerAccount.go | 4 +- .../smartContract/executingSCCalls_test.go | 2 +- integrationTests/state/stateTrie_test.go | 12 +++- integrationTests/testInitializer.go | 6 +- integrationTests/testProcessorNode.go | 11 ++- process/coordinator/process_test.go | 20 +++--- process/interface.go | 1 - process/smartContract/process.go | 31 -------- process/smartContract/process_test.go | 72 ++++++++----------- process/transaction/process.go | 2 +- process/transaction/process_test.go | 4 +- process/unsigned/feeTxHandler.go | 11 ++- 14 files changed, 147 insertions(+), 98 deletions(-) create mode 100644 data/mock/txTypeHandlerMock.go create mode 100644 data/mock/unsignedTxHandlerMock.go diff --git a/data/mock/txTypeHandlerMock.go b/data/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..2fcaeaf25d3 --- /dev/null +++ b/data/mock/txTypeHandlerMock.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} diff --git a/data/mock/unsignedTxHandlerMock.go b/data/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..9ac5fd55a10 --- /dev/null +++ b/data/mock/unsignedTxHandlerMock.go @@ -0,0 +1,51 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + AddProcessedUTxCalled func(tx data.TransactionHandler) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { + if ut.AddProcessedUTxCalled == nil { + return + } + + ut.AddProcessedUTxCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 8c0a9b06353..976149dbe79 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -87,7 +87,7 @@ func (a *PeerAccount) AddressContainer() AddressContainer { // SetNonceWithJournal sets the account's nonce, saving the old nonce before changing func (a *PeerAccount) SetNonceWithJournal(nonce uint64) error { - entry, err := NewJournalEntryNonce(a, a.Nonce) + entry, err := NewBaseJournalEntryNonce(a, a.Nonce) if err != nil { return err } @@ -153,7 +153,7 @@ func (a *PeerAccount) SetRootHash(roothash []byte) { // SetRootHashWithJournal sets the account's root hash, saving the old root hash before changing func (a *PeerAccount) SetRootHashWithJournal(rootHash []byte) error { - entry, err := NewBaseJournalEntryRootHash(a, a.RootHash) + entry, err := NewBaseJournalEntryRootHash(a, a.RootHash, a.DataTrie()) if err != nil { return err } diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 248623a1fbf..4e4f479b3a1 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -445,7 +445,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo // - Initial balance + withdraw value - fees // TODO: Fees and gas should be taken into consideration when the fees are implemented - now we have extra money // from the gas returned since the gas was not substracted in the first place - finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue+uint64(gasLimit-1*gasPrice)))) + finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue-1))) acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) assert.Equal(t, finalValue, acc.Balance) } diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index da5e676150b..a82359eee58 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -1144,7 +1144,17 @@ func createAndExecTxs( shardCoordinator := mock2.NewMultiShardsCoordinatorMock(1) addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txProcessor, _ := transaction.NewTxProcessor(adb, hasher, addrConv, marsh, shardCoordinator, &mock2.SCProcessorMock{}) + txProcessor, _ := transaction.NewTxProcessor( + adb, + hasher, + addrConv, + marsh, + shardCoordinator, + &mock2.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) + var totalTime int64 = 0 for i := 0; i < nrTxs; i++ { diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index e60cb420f83..8ebe6c98374 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -33,7 +33,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" @@ -169,12 +169,12 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage -func CreateAccountsDB(shardCoordinator sharding.Coordinator) *state.AccountsDB { +func CreateAccountsDB(accountType factory.Type) *state.AccountsDB { hasher := sha256.Sha256{} store := CreateMemUnit() tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) - accountFactory, _ := factory.NewAccountFactoryCreator(shardCoordinator) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) return adb diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 830d4d8c5f1..bccf95a996f 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -42,8 +42,9 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" + factory2 "github.com/ElrondNetwork/elrond-go/data/state/factory" ) // TestHasher represents a Sha256 hasher @@ -125,7 +126,7 @@ func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeySha tpn.initCrypto(txSignPrivKeyShardId) tpn.initDataPools() tpn.initStorage() - tpn.AccntState = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState = CreateAccountsDB(factory2.UserAccount) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) tpn.initInterceptors() @@ -289,6 +290,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, TestHasher, TestAddressConverter, + &mock.SpecialAddressHandlerMock{}, tpn.Storage, ) tpn.InterimProcContainer, _ = interimProcFactory.Create() @@ -308,8 +310,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestAddressConverter, tpn.ShardCoordinator, tpn.ScrForwarder, + &mock.UnsignedTxHandlerMock{}, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) + tpn.TxProcessor, _ = transaction.NewTxProcessor( tpn.AccntState, TestHasher, @@ -317,6 +322,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, tpn.ShardCoordinator, tpn.ScProcessor, + &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) fact, _ := shard.NewPreProcessorsContainerFactory( diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index fe84d1d8251..533b1670019 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -2,29 +2,28 @@ package coordinator import ( "bytes" - "errors" - "math/big" "reflect" - "sync" - "sync/atomic" "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/data" + "math/big" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/process/factory" + "time" + "sync/atomic" + "sync" + "errors" + "github.com/ElrondNetwork/elrond-go/data/state" ) func initDataPool(testHash []byte) *mock.PoolsHolderStub { @@ -388,6 +387,7 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, initStore(), ) container, _ := preFactory.Create() diff --git a/process/interface.go b/process/interface.go index 5963def09cf..f6a3efd1b82 100644 --- a/process/interface.go +++ b/process/interface.go @@ -55,7 +55,6 @@ type TransactionCoordinator interface { // SmartContractProcessor is the main interface for the smart contract caller engine type SmartContractProcessor interface { - ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 62046f3b78a..6efe9c62a68 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -104,37 +104,6 @@ func NewSmartContractProcessor( mapExecState: make(map[uint64]scExecutionState)}, nil } -// ComputeTransactionType calculates the type of the transaction -func (sc *scProcessor) ComputeTransactionType(tx *transaction.Transaction) (process.TransactionType, error) { - err := sc.checkTxValidity(tx) - if err != nil { - return 0, err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if isEmptyAddress { - if len(tx.Data) > 0 { - return process.SCDeployment, nil - } - return 0, process.ErrWrongTransaction - } - - acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) - if err != nil { - return 0, err - } - - if acntDst == nil { - return process.MoveBalance, nil - } - - if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { - return process.SCInvoking, nil - } - - return process.MoveBalance, nil -} - func (sc *scProcessor) checkTxValidity(tx *transaction.Transaction) error { if tx == nil || tx.IsInterfaceNil() { return process.ErrNilTransaction diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 4fb7f79f4ad..7b953f1391c 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/process/coordinator" ) func generateRandomByteSlice(size int) []byte { @@ -251,33 +252,24 @@ func TestNewSmartContractProcessor(t *testing.T) { assert.NotNil(t, sc) assert.Nil(t, err) - - tx := &transaction.Transaction{} - tx.Nonce = 0 - tx.SndAddr = []byte("SRC") - tx.RcvAddr = nil - tx.Value = big.NewInt(45) - - _, err = sc.ComputeTransactionType(tx) - assert.Equal(t, process.ErrWrongTransaction, err) } func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { t.Parallel() addressConverter := &mock.AddressConverterMock{} - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, + + txTypeHandler, err := coordinator.NewTxTypeHandler( addressConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return nil, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) tx := &transaction.Transaction{} @@ -287,7 +279,7 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { tx.Data = "data" tx.Value = big.NewInt(45) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCDeployment, txType) } @@ -306,23 +298,20 @@ func TestScProcessor_ComputeTransactionTypeScInvoking(t *testing.T) { _, acntDst := createAccounts(tx) acntDst.SetCode([]byte("code")) - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCInvoking, txType) } @@ -340,23 +329,20 @@ func TestScProcessor_ComputeTransactionTypeMoveBalance(t *testing.T) { _, acntDst := createAccounts(tx) - sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.MoveBalance, txType) } diff --git a/process/transaction/process.go b/process/transaction/process.go index 499ed4a7ada..fe827e07b5c 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -90,7 +90,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return err } - txType, err := txProc.scProcessor.ComputeTransactionType(tx) + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) if err != nil { return err } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index aca649717dd..0ce3f15457f 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -1053,6 +1053,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) tx.Value = big.NewInt(45) + tx.GasPrice = 1 + tx.GasLimit = 1 acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) assert.Nil(t, err) @@ -1060,7 +1062,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) assert.Nil(t, err) - acntSrc.Balance = big.NewInt(45) + acntSrc.Balance = big.NewInt(46) acntDst.SetCode([]byte{65}) accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 20cd00ca581..69eb1925db6 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -13,10 +13,10 @@ import ( ) // MinGasPrice is the minimal gas price to be paid for any transaction -var MinGasPrice = uint64(1) +var MinGasPrice = uint64(0) // MinTxFee is the minimal fee to be paid for any transaction -var MinTxFee = uint64(1) +var MinTxFee = uint64(0) const communityPercentage = 0.1 // 1 = 100%, 0 = 0% const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% @@ -247,3 +247,10 @@ func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { return nil } + +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (ftxh *feeTxHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + // TODO: implement me + + return make([][]byte, 0), nil +} From dba63cf37dad6c65b397a3359bc1ef1c125a29aa Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 1 Aug 2019 13:29:29 +0300 Subject: [PATCH 050/234] process: fix goimports --- .../broadcast/shardChainMessenger_test.go | 2 +- integrationTests/testProcessorNode.go | 6 +++--- process/coordinator/process_test.go | 19 ++++++++++--------- process/smartContract/process_test.go | 2 +- process/unsigned/feeTxHandler.go | 1 + 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/consensus/broadcast/shardChainMessenger_test.go b/consensus/broadcast/shardChainMessenger_test.go index 7f9a959ba58..ab21755bd59 100644 --- a/consensus/broadcast/shardChainMessenger_test.go +++ b/consensus/broadcast/shardChainMessenger_test.go @@ -1,7 +1,6 @@ package broadcast_test import ( - "github.com/ElrondNetwork/elrond-go/process/factory" "testing" "time" @@ -9,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/stretchr/testify/assert" ) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index bccf95a996f..53ca0392b2b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -19,6 +19,7 @@ import ( dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + accountfactory "github.com/ElrondNetwork/elrond-go/data/state/factory" dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -42,9 +43,8 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" - factory2 "github.com/ElrondNetwork/elrond-go/data/state/factory" ) // TestHasher represents a Sha256 hasher @@ -126,7 +126,7 @@ func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeySha tpn.initCrypto(txSignPrivKeyShardId) tpn.initDataPools() tpn.initStorage() - tpn.AccntState = CreateAccountsDB(factory2.UserAccount) + tpn.AccntState = CreateAccountsDB(accountfactory.UserAccount) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) tpn.initInterceptors() diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 533b1670019..5b1db17b56e 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -2,28 +2,29 @@ package coordinator import ( "bytes" + "errors" + "math/big" "reflect" + "sync" + "sync/atomic" "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go/data" - "math/big" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/process/factory" - "time" - "sync/atomic" - "sync" - "errors" - "github.com/ElrondNetwork/elrond-go/data/state" ) func initDataPool(testHash []byte) *mock.PoolsHolderStub { diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 7b953f1391c..bded551072f 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -10,11 +10,11 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/ElrondNetwork/elrond-go/process/coordinator" ) func generateRandomByteSlice(size int) []byte { diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 69eb1925db6..f0c97cc717b 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -12,6 +12,7 @@ import ( "sync" ) +// TODO: Set MinGasPrice and MinTxFee to some positive value (TBD) // MinGasPrice is the minimal gas price to be paid for any transaction var MinGasPrice = uint64(0) From fce5e0d848d83f21533a83c63cee0b1fe73091fc Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 1 Aug 2019 15:13:02 +0300 Subject: [PATCH 051/234] node, sharding, integrationTests: format go imports --- ...electorMock.go => nodesCoordinatorMock.go} | 0 .../singleShard/block/testInitializer.go | 2 +- node/defineOptions.go | 1 + node/defineOptions_test.go | 26 +++++++ node/mock/nodesCoordinatorMock.go | 70 +++++++++++++++++++ node/mock/validatorMock.go | 27 +++++++ sharding/indexHashedNodesCoordinator.go | 35 +++++----- 7 files changed, 141 insertions(+), 20 deletions(-) rename consensus/mock/{validatorGroupSelectorMock.go => nodesCoordinatorMock.go} (100%) create mode 100644 node/mock/nodesCoordinatorMock.go create mode 100644 node/mock/validatorMock.go diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/nodesCoordinatorMock.go similarity index 100% rename from consensus/mock/validatorGroupSelectorMock.go rename to consensus/mock/nodesCoordinatorMock.go diff --git a/integrationTests/singleShard/block/testInitializer.go b/integrationTests/singleShard/block/testInitializer.go index 5e12ce87b6e..8b8c331a0bd 100644 --- a/integrationTests/singleShard/block/testInitializer.go +++ b/integrationTests/singleShard/block/testInitializer.go @@ -5,6 +5,7 @@ import ( "crypto/ecdsa" "fmt" "io" + "math/big" "math/rand" "strings" "time" @@ -43,7 +44,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "math/big" ) var r io.Reader diff --git a/node/defineOptions.go b/node/defineOptions.go index 7b1e1e72574..20b85c11b07 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -265,6 +265,7 @@ func WithShardCoordinator(shardCoordinator sharding.Coordinator) Option { } } +// WithNodesCoordinator sets up the nodes coordinator func WithNodesCoordinator(nodesCoordinator sharding.NodesCoordinator) Option { return func(n *Node) error { if nodesCoordinator == nil { diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 569f38377a7..def777d7d03 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -543,6 +543,32 @@ func TestWithShardCoordinator_ShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestWithNodesCoordinator_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithNodesCoordinator(nil) + err := opt(node) + + assert.Nil(t, node.nodesCoordinator) + assert.Equal(t, ErrNilNodesCoordinator, err) +} + +func TestWithNodesCoordinator_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + nodesCoordinator := &mock.NodesCoordinatorMock{} + + opt := WithNodesCoordinator(nodesCoordinator) + err := opt(node) + + assert.True(t, node.nodesCoordinator == nodesCoordinator) + assert.Nil(t, err) +} + func TestWithUint64ByteSliceConverter_NilConverterShouldErr(t *testing.T) { t.Parallel() diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..3f16e81f374 --- /dev/null +++ b/node/mock/nodesCoordinatorMock.go @@ -0,0 +1,70 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A")), + NewValidatorMock(big.NewInt(0), 0, []byte("B")), + NewValidatorMock(big.NewInt(0), 0, []byte("C")), + NewValidatorMock(big.NewInt(0), 0, []byte("D")), + NewValidatorMock(big.NewInt(0), 0, []byte("E")), + NewValidatorMock(big.NewInt(0), 0, []byte("F")), + NewValidatorMock(big.NewInt(0), 0, []byte("G")), + NewValidatorMock(big.NewInt(0), 0, []byte("H")), + NewValidatorMock(big.NewInt(0), 0, []byte("I")), + } + + return list, nil +} + +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} diff --git a/node/mock/validatorMock.go b/node/mock/validatorMock.go new file mode 100644 index 00000000000..cd22a51ce85 --- /dev/null +++ b/node/mock/validatorMock.go @@ -0,0 +1,27 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 0f37eca7ed6..9fe53d6c26f 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -99,20 +99,14 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt return nil, ErrNilRandomness } - tempList := make([]Validator, 0) - var consensusGroupSize int - if ihgs == nil { return nil, ErrNilRandomness } - if ihgs.shardId == MetachainShardId { - consensusGroupSize = ihgs.metaConsensusGroupSize - } else { - consensusGroupSize = ihgs.shardConsensusGroupSize - } + tempList := make([]Validator, 0) + cSize := ihgs.consensusGroupSize() - for startIdx := 0; startIdx < consensusGroupSize; startIdx++ { + for startIdx := 0; startIdx < cSize; startIdx++ { proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) checkedIndex := ihgs.checkIndex(proposedIndex, tempList) @@ -145,19 +139,13 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte shardEligibleLen := uint16(len(ihgs.nodesMap[ihgs.shardId])) invalidSelection := selectionLen < shardEligibleLen - var consensusGroupSize int if invalidSelection { return nil, ErrEligibleSelectionMismatch } - if ihgs.shardId == MetachainShardId { - consensusGroupSize = ihgs.shardConsensusGroupSize - } else { - consensusGroupSize = ihgs.metaConsensusGroupSize - } - - publicKeys = make([]string, consensusGroupSize) + cSize := ihgs.consensusGroupSize() + publicKeys = make([]string, cSize) cnt := 0 for i := uint16(0); i < shardEligibleLen; i++ { @@ -170,12 +158,12 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) publicKeys[cnt] = string(ihgs.nodesMap[ihgs.shardId][i].PubKey()) cnt++ - if cnt > consensusGroupSize { + if cnt > cSize { return nil, ErrEligibleTooManySelections } } - if cnt < consensusGroupSize { + if cnt < cSize { return nil, ErrEligibleTooFewSelections } @@ -230,3 +218,12 @@ func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []V return false } + +// consensusGroupSize returns the consensus group size for the node's shard +func (ihgs *indexHashedNodesCoordinator) consensusGroupSize() int { + if ihgs.shardId == MetachainShardId { + return ihgs.metaConsensusGroupSize + } + + return ihgs.shardConsensusGroupSize +} From 455bf9b83b771a3e1a944cc610a56daa209f9431 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 2 Aug 2019 09:05:20 +0300 Subject: [PATCH 052/234] integrationTests: adaptations after merge --- integrationTests/mock/nodesCoordinatorMock.go | 70 +++++++++++++++++++ integrationTests/mock/validatorMock.go | 27 +++++++ .../interceptedBulkUnsignedTx_test.go | 10 ++- .../transaction/interceptedResolvedTx_test.go | 2 +- .../interceptedResolvedUnsignedTx_test.go | 16 ++++- integrationTests/testProcessorNode.go | 9 ++- .../metablock/shardHeaderInterceptor_test.go | 5 -- 7 files changed, 127 insertions(+), 12 deletions(-) create mode 100644 integrationTests/mock/nodesCoordinatorMock.go create mode 100644 integrationTests/mock/validatorMock.go diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..3f16e81f374 --- /dev/null +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -0,0 +1,70 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A")), + NewValidatorMock(big.NewInt(0), 0, []byte("B")), + NewValidatorMock(big.NewInt(0), 0, []byte("C")), + NewValidatorMock(big.NewInt(0), 0, []byte("D")), + NewValidatorMock(big.NewInt(0), 0, []byte("E")), + NewValidatorMock(big.NewInt(0), 0, []byte("F")), + NewValidatorMock(big.NewInt(0), 0, []byte("G")), + NewValidatorMock(big.NewInt(0), 0, []byte("H")), + NewValidatorMock(big.NewInt(0), 0, []byte("I")), + } + + return list, nil +} + +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} diff --git a/integrationTests/mock/validatorMock.go b/integrationTests/mock/validatorMock.go new file mode 100644 index 00000000000..cd22a51ce85 --- /dev/null +++ b/integrationTests/mock/validatorMock.go @@ -0,0 +1,27 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go index 11390acc2ec..47ca49d6e5c 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go @@ -35,7 +35,15 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") accntAdapter := createAccountsDB() shardCoordinator := &sharding.OneShardCoordinator{} - n, mes, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) + n, mes, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) _ = n.Start() defer func() { diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 4baf2519055..ce6dbf08a56 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -27,7 +27,7 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( 1, 1, - hasher, + testHasher, 0, 1, make(map[uint32][]sharding.Validator), diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go index c549e5c6727..b5c51e27125 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go @@ -22,18 +22,30 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { dPoolRequester := createTestDataPool() dPoolResolver := createTestDataPool() shardCoordinator := &sharding.OneShardCoordinator{} + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + 0, + 1, + make(map[uint32][]sharding.Validator), + ) fmt.Println("Requester:") nRequester, mesRequester, sk1, resolversFinder := createNetNode( dPoolRequester, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator, + ) fmt.Println("Resolver:") nResolver, mesResolver, _, _ := createNetNode( dPoolResolver, createAccountsDB(), - shardCoordinator) + shardCoordinator, + nodesCoordinator, + ) _ = nRequester.Start() _ = nResolver.Start() diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 53ca0392b2b..730c60478f1 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -43,7 +43,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" ) @@ -66,6 +66,7 @@ var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() // with all its fields exported type TestProcessorNode struct { ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator Messenger p2p.Messenger SingleSigner crypto.SingleSigner @@ -116,11 +117,13 @@ type TestProcessorNode struct { // NewTestProcessorNode returns a new TestProcessorNode instance func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := mock.NodesCoordinatorMock{} messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } tpn.initCrypto(txSignPrivKeyShardId) @@ -202,13 +205,13 @@ func (tpn *TestProcessorNode) initInterceptors() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, TestHasher, TestMultiSig, tpn.MetaDataPool, - &mock.ChronologyValidatorMock{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -218,6 +221,7 @@ func (tpn *TestProcessorNode) initInterceptors() { } else { interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, @@ -227,7 +231,6 @@ func (tpn *TestProcessorNode) initInterceptors() { TestMultiSig, tpn.ShardDataPool, TestAddressConverter, - &mock.ChronologyValidatorMock{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() diff --git a/process/metablock/shardHeaderInterceptor_test.go b/process/metablock/shardHeaderInterceptor_test.go index c1747a8e181..d27bbc31473 100644 --- a/process/metablock/shardHeaderInterceptor_test.go +++ b/process/metablock/shardHeaderInterceptor_test.go @@ -170,11 +170,6 @@ func TestShardHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint32) error { - return nil - }, - } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) error { return errors.New("key not found") From 41ec714ab15a51bbf41b3e1c948061b65c94a06e Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 2 Aug 2019 14:40:40 +0300 Subject: [PATCH 053/234] integrationTests: adapt refactored integrationTests to nodeCoordinator --- integrationTests/mock/nodesCoordinatorMock.go | 70 ------------------ integrationTests/mock/validatorMock.go | 27 ------- .../block/executingMiniblocksSc_test.go | 74 ++++++++++++++++--- .../block/executingMiniblocksSc_test.go | 9 ++- integrationTests/testInitializer.go | 59 +++++++++++++++ integrationTests/testProcessorNode.go | 29 +++++++- 6 files changed, 159 insertions(+), 109 deletions(-) delete mode 100644 integrationTests/mock/nodesCoordinatorMock.go delete mode 100644 integrationTests/mock/validatorMock.go diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go deleted file mode 100644 index 3f16e81f374..00000000000 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ /dev/null @@ -1,70 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/ElrondNetwork/elrond-go/sharding" -) - -type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) -} - -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( - randomness []byte, -) (validatorsGroup []sharding.Validator, err error) { - - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness) - } - - list := []sharding.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), - } - - return list, nil -} - -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness) - } - - validators, err := ncm.ComputeValidatorsGroup(randomness) - if err != nil { - return nil, err - } - - pubKeys := make([]string, 0) - - for _, v := range validators { - pubKeys = append(pubKeys, string(v.PubKey())) - } - - return pubKeys, nil -} - -func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { - panic("implement me") -} - -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { - return nil -} - -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { - panic("implement me") -} - -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { - panic("implement me") -} diff --git a/integrationTests/mock/validatorMock.go b/integrationTests/mock/validatorMock.go deleted file mode 100644 index cd22a51ce85..00000000000 --- a/integrationTests/mock/validatorMock.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -import ( - "math/big" -) - -type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte -} - -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} -} - -func (vm *ValidatorMock) Stake() *big.Int { - return vm.stake -} - -func (vm *ValidatorMock) Rating() int32 { - return vm.rating -} - -func (vm *ValidatorMock) PubKey() []byte { - return vm.pubKey -} diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index 41991f418f8..99a5775a892 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -38,12 +38,31 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - nodeShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - nodeShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + validatorsKeysMap := integrationTests.CreateValidatorKeys(1, 1, int(maxShards)) + nodeShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + validatorsKeysMap, + ) + nodeShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + validatorsKeysMap, + ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") nodeShard1.LoadTxSignSkBytes(hardCodedSk) - nodeMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) + nodeMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + validatorsKeysMap, + ) nodes := []*integrationTests.TestProcessorNode{nodeShard0, nodeShard1, nodeMeta} idxNodeShard0 := 0 @@ -135,17 +154,54 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - nodeProposerShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - nodeValidatorShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) + validatorsKeysMap := integrationTests.CreateValidatorKeys(2, 2, int(maxShards)) + nodeProposerShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + validatorsKeysMap, + ) + nodeValidatorShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + validatorsKeysMap, + ) - nodeProposerShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeProposerShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + validatorsKeysMap, + ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") nodeProposerShard1.LoadTxSignSkBytes(hardCodedSk) - nodeValidatorShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeValidatorShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + validatorsKeysMap, + ) - nodeProposerMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) - nodeValidatorMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) + nodeProposerMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + validatorsKeysMap, + ) + nodeValidatorMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + validatorsKeysMap, + ) nodes := []*integrationTests.TestProcessorNode{ nodeProposerShard0, diff --git a/integrationTests/singleShard/block/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc_test.go index 61359c9529c..c183d2704f5 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc_test.go @@ -36,9 +36,16 @@ func TestShouldProcessWithScTxsJoinAndRewardTheOwner(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + validatorsKeysMap := integrationTests.CreateValidatorKeys(numOfNodes, 1, int(maxShards)) nodes := make([]*integrationTests.TestProcessorNode, numOfNodes) for i := 0; i < numOfNodes; i++ { - nodes[i] = integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) + nodes[i] = integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + validatorsKeysMap, + ) } idxProposer := 0 diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 8ebe6c98374..263d6f6e821 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -37,6 +37,8 @@ import ( "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + "github.com/ElrondNetwork/elrond-go/crypto/signing" ) // GetConnectableAddress returns a non circuit, non windows default connectable address for provided messenger @@ -334,3 +336,60 @@ func mintAddressesFromSameShard(nodes []*TestProcessorNode, targetNodeIdx int, v MintAddress(targetNode.AccntState, n.PkTxSignBytes, value) } } + +// CreateValidatorKeys +func CreateValidatorKeys(nodesPerShard int, nbMetaNodes int, nbShards int) map[uint32][]*TestKeyPair { + suite := kyber.NewBlakeSHA256Ed25519() + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + return keysMap +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 730c60478f1..b86e86e378b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -62,6 +62,12 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() +// TestKeyPair holds a pair of private/public keys +type TestKeyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { @@ -115,9 +121,28 @@ type TestProcessorNode struct { } // NewTestProcessorNode returns a new TestProcessorNode instance -func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string) *TestProcessorNode { +func NewTestProcessorNode( + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, + validatorsKeys map[uint32][]*TestKeyPair, +) *TestProcessorNode { + + pubKeysMap := pubKeysMapFromKeysMap(validatorsKeys) + validatorsMap := genValidatorsFromPubKeys(pubKeysMap) + + shardConsensusSize := len(validatorsMap[0]) + metaConsensusSize := len(validatorsMap[sharding.MetachainShardId]) shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - nodesCoordinator := mock.NodesCoordinatorMock{} + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + shardConsensusSize, + metaConsensusSize, + TestHasher, + nodeShardId, + maxShards, + validatorsMap, + ) messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ From 59f9cffc62a34b7b71714d10708eba9335e159f1 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 5 Aug 2019 10:32:52 +0300 Subject: [PATCH 054/234] integrationTests: create nodes coordinato mock --- integrationTests/mock/nodesCoordinatorMock.go | 58 +++++++++++++++++++ .../block/executingMiniblocksSc_test.go | 12 ---- .../block/executingMiniblocksSc_test.go | 2 - integrationTests/testProcessorNode.go | 17 +----- 4 files changed, 60 insertions(+), 29 deletions(-) create mode 100644 integrationTests/mock/nodesCoordinatorMock.go diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..328c50102af --- /dev/null +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -0,0 +1,58 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{} + + return list, nil +} + +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index 99a5775a892..9dd8f711872 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -38,20 +38,17 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - validatorsKeysMap := integrationTests.CreateValidatorKeys(1, 1, int(maxShards)) nodeShard0 := integrationTests.NewTestProcessorNode( maxShards, 0, 0, advertiserAddr, - validatorsKeysMap, ) nodeShard1 := integrationTests.NewTestProcessorNode( maxShards, 1, 1, advertiserAddr, - validatorsKeysMap, ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") @@ -61,7 +58,6 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { sharding.MetachainShardId, 0, advertiserAddr, - validatorsKeysMap, ) nodes := []*integrationTests.TestProcessorNode{nodeShard0, nodeShard1, nodeMeta} @@ -153,21 +149,17 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - - validatorsKeysMap := integrationTests.CreateValidatorKeys(2, 2, int(maxShards)) nodeProposerShard0 := integrationTests.NewTestProcessorNode( maxShards, 0, 0, advertiserAddr, - validatorsKeysMap, ) nodeValidatorShard0 := integrationTests.NewTestProcessorNode( maxShards, 0, 0, advertiserAddr, - validatorsKeysMap, ) nodeProposerShard1 := integrationTests.NewTestProcessorNode( @@ -175,7 +167,6 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { 1, 1, advertiserAddr, - validatorsKeysMap, ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") @@ -185,7 +176,6 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { 1, 1, advertiserAddr, - validatorsKeysMap, ) nodeProposerMeta := integrationTests.NewTestProcessorNode( @@ -193,14 +183,12 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { sharding.MetachainShardId, 0, advertiserAddr, - validatorsKeysMap, ) nodeValidatorMeta := integrationTests.NewTestProcessorNode( maxShards, sharding.MetachainShardId, 0, advertiserAddr, - validatorsKeysMap, ) nodes := []*integrationTests.TestProcessorNode{ diff --git a/integrationTests/singleShard/block/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc_test.go index c183d2704f5..3938a349cd3 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc_test.go @@ -36,7 +36,6 @@ func TestShouldProcessWithScTxsJoinAndRewardTheOwner(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - validatorsKeysMap := integrationTests.CreateValidatorKeys(numOfNodes, 1, int(maxShards)) nodes := make([]*integrationTests.TestProcessorNode, numOfNodes) for i := 0; i < numOfNodes; i++ { nodes[i] = integrationTests.NewTestProcessorNode( @@ -44,7 +43,6 @@ func TestShouldProcessWithScTxsJoinAndRewardTheOwner(t *testing.T) { 0, 0, advertiserAddr, - validatorsKeysMap, ) } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index b86e86e378b..71181121cd4 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -126,23 +126,10 @@ func NewTestProcessorNode( nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string, - validatorsKeys map[uint32][]*TestKeyPair, ) *TestProcessorNode { - pubKeysMap := pubKeysMapFromKeysMap(validatorsKeys) - validatorsMap := genValidatorsFromPubKeys(pubKeysMap) - - shardConsensusSize := len(validatorsMap[0]) - metaConsensusSize := len(validatorsMap[sharding.MetachainShardId]) shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - shardConsensusSize, - metaConsensusSize, - TestHasher, - nodeShardId, - maxShards, - validatorsMap, - ) + nodesCoordinator := &mock.NodesCoordinatorMock{} messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ @@ -563,7 +550,7 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64) (data.BodyHandler, data blockHeader.SetRound(round) blockHeader.SetNonce(uint64(round)) - blockHeader.SetPubKeysBitmap(make([]byte, 0)) + blockHeader.SetPubKeysBitmap([]byte{1}) sig, _ := TestMultiSig.AggregateSigs(nil) blockHeader.SetSignature(sig) currHdr := tpn.BlockChain.GetCurrentBlockHeader() From 638819a1fd0502096eae6de242e4750f466e0a9c Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 5 Aug 2019 11:55:48 +0300 Subject: [PATCH 055/234] integrationTests: sort imports --- integrationTests/testInitializer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 263d6f6e821..25b52911b0d 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -12,6 +12,8 @@ import ( "sync/atomic" "time" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" "github.com/ElrondNetwork/elrond-go/data" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" @@ -33,12 +35,10 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - "github.com/ElrondNetwork/elrond-go/crypto/signing" ) // GetConnectableAddress returns a non circuit, non windows default connectable address for provided messenger From c4079c2a5ad64b2c9c7657cb966f8228669e12dd Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 12 Aug 2019 15:12:17 +0300 Subject: [PATCH 056/234] data: rewards transaction format --- data/rewardTx/capnp/schema.capnp | 19 ++ data/rewardTx/capnp/schema.capnp.go | 271 ++++++++++++++++++++++++++++ data/rewardTx/rewardTx.go | 119 ++++++++++++ data/rewardTx/rewardTx_test.go | 68 +++++++ 4 files changed, 477 insertions(+) create mode 100644 data/rewardTx/capnp/schema.capnp create mode 100644 data/rewardTx/capnp/schema.capnp.go create mode 100644 data/rewardTx/rewardTx.go create mode 100644 data/rewardTx/rewardTx_test.go diff --git a/data/rewardTx/capnp/schema.capnp b/data/rewardTx/capnp/schema.capnp new file mode 100644 index 00000000000..8b963360616 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp @@ -0,0 +1,19 @@ +@0xa6e50837d4563fc2; +using Go = import "/go.capnp"; +$Go.package("capnp"); +$Go.import("_"); + +struct RewardTxCapn { + round @0: UInt64; + epoch @1: UInt32; + value @2: Data; + rcvAddr @3: Data; + shardId @4: UInt32; +} + +##compile with: + +## +## +## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp/schema.capnp + diff --git a/data/rewardTx/capnp/schema.capnp.go b/data/rewardTx/capnp/schema.capnp.go new file mode 100644 index 00000000000..f9e5247b348 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp.go @@ -0,0 +1,271 @@ +package capnp + +// AUTO GENERATED - DO NOT EDIT + +import ( + "bufio" + "bytes" + "encoding/json" + C "github.com/glycerine/go-capnproto" + "io" +) + +type RewardTxCapn C.Struct + +func NewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStruct(16, 2)) } +func NewRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewRootStruct(16, 2)) } +func AutoNewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStructAR(16, 2)) } +func ReadRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.Root(0).ToStruct()) } +func (s RewardTxCapn) Round() uint64 { return C.Struct(s).Get64(0) } +func (s RewardTxCapn) SetRound(v uint64) { C.Struct(s).Set64(0, v) } +func (s RewardTxCapn) Epoch() uint32 { return C.Struct(s).Get32(8) } +func (s RewardTxCapn) SetEpoch(v uint32) { C.Struct(s).Set32(8, v) } +func (s RewardTxCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s RewardTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s RewardTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s RewardTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s RewardTxCapn) ShardId() uint32 { return C.Struct(s).Get32(12) } +func (s RewardTxCapn) SetShardId(v uint32) { C.Struct(s).Set32(12, v) } +func (s RewardTxCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"round\":") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"epoch\":") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"value\":") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"rcvAddr\":") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"shardId\":") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s RewardTxCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("round = ") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("epoch = ") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("value = ") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("rcvAddr = ") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("shardId = ") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type RewardTxCapn_List C.PointerList + +func NewRewardTxCapnList(s *C.Segment, sz int) RewardTxCapn_List { + return RewardTxCapn_List(s.NewCompositeList(16, 2, sz)) +} +func (s RewardTxCapn_List) Len() int { return C.PointerList(s).Len() } +func (s RewardTxCapn_List) At(i int) RewardTxCapn { + return RewardTxCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s RewardTxCapn_List) ToArray() []RewardTxCapn { + n := s.Len() + a := make([]RewardTxCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s RewardTxCapn_List) Set(i int, item RewardTxCapn) { C.PointerList(s).Set(i, C.Object(item)) } diff --git a/data/rewardTx/rewardTx.go b/data/rewardTx/rewardTx.go new file mode 100644 index 00000000000..0bee4c200c1 --- /dev/null +++ b/data/rewardTx/rewardTx.go @@ -0,0 +1,119 @@ +package rewardTx + +import ( + "io" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp" + "github.com/glycerine/go-capnproto" +) + +// RewardTx holds the data for a reward transaction +type RewardTx struct { + Round uint64 `capid:"1" json:"round"` + Epoch uint32 `capid:"2" json:"epoch"` + Value *big.Int `capid:"3" json:"value"` + RcvAddr []byte `capid:"4" json:"receiver"` + ShardId uint32 `capid:"5" json:"shardId"` +} + +// Save saves the serialized data of a RewardTx into a stream through Capnp protocol +func (scr *RewardTx) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + RewardTxGoToCapn(seg, scr) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a RewardTx object through Capnp protocol +func (scr *RewardTx) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + + z := capnp.ReadRootRewardTxCapn(capMsg) + RewardTxCapnToGo(z, scr) + return nil +} + +// RewardTxCapnToGo is a helper function to copy fields from a RewardTxCapn object to a RewardTx object +func RewardTxCapnToGo(src capnp.RewardTxCapn, dest *RewardTx) *RewardTx { + if dest == nil { + dest = &RewardTx{} + } + + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + + dest.Epoch = src.Epoch() + dest.Round = src.Round() + err := dest.Value.GobDecode(src.Value()) + + if err != nil { + return nil + } + + dest.RcvAddr = src.RcvAddr() + dest.ShardId = src.ShardId() + + return dest +} + +// RewardTxGoToCapn is a helper function to copy fields from a RewardTx object to a RewardTxCapn object +func RewardTxGoToCapn(seg *capn.Segment, src *RewardTx) capnp.RewardTxCapn { + dest := capnp.AutoNewRewardTxCapn(seg) + + value, _ := src.Value.GobEncode() + dest.SetEpoch(src.Epoch) + dest.SetRound(src.Round) + dest.SetValue(value) + dest.SetRcvAddr(src.RcvAddr) + dest.SetShardId(src.ShardId) + + return dest +} + +// IsInterfaceNil verifies if underlying object is nil +func (scr *RewardTx) IsInterfaceNil() bool { + return scr == nil +} + +// GetValue returns the value of the reward transaction +func (scr *RewardTx) GetValue() *big.Int { + return scr.Value +} + +// GetData returns the data of the reward transaction +func (scr *RewardTx) GetData() string { + return "" +} + +// GetRecvAddress returns the receiver address from the reward transaction +func (scr *RewardTx) GetRecvAddress() []byte { + return scr.RcvAddr +} + +// GetSndAddress returns the sender address from the reward transaction +func (scr *RewardTx) GetSndAddress() []byte { + return nil +} + +// SetValue sets the value of the reward transaction +func (scr *RewardTx) SetValue(value *big.Int) { + scr.Value = value +} + +// SetData sets the data of the reward transaction +func (scr *RewardTx) SetData(data string) { +} + +// SetRecvAddress sets the receiver address of the reward transaction +func (scr *RewardTx) SetRecvAddress(addr []byte) { + scr.RcvAddr = addr +} + +// SetSndAddress sets the sender address of the reward transaction +func (scr *RewardTx) SetSndAddress(addr []byte) { +} diff --git a/data/rewardTx/rewardTx_test.go b/data/rewardTx/rewardTx_test.go new file mode 100644 index 00000000000..80930abac26 --- /dev/null +++ b/data/rewardTx/rewardTx_test.go @@ -0,0 +1,68 @@ +package rewardTx_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/stretchr/testify/assert" +) + +func TestRewardTx_SaveLoad(t *testing.T) { + smrS := rewardTx.RewardTx{ + Round: uint64(1), + Epoch: uint32(1), + Value: big.NewInt(1), + RcvAddr: []byte("receiver_address"), + ShardId: 10, + } + + var b bytes.Buffer + err := smrS.Save(&b) + assert.Nil(t, err) + + loadSMR := rewardTx.RewardTx{} + err = loadSMR.Load(&b) + assert.Nil(t, err) + + assert.Equal(t, smrS, loadSMR) +} + +func TestRewardTx_GetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{RcvAddr: data} + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_GetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{Value: value} + + assert.Equal(t, value, scr.Value) +} + +func TestRewardTx_SetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{} + scr.SetRecvAddress(data) + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_SetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{} + scr.SetValue(value) + + assert.Equal(t, value, scr.Value) +} From d00046b2e208d0eb23e99b21d9a97603072c0aa5 Mon Sep 17 00:00:00 2001 From: Sebastian Marian <36901555+SebastianMarian@users.noreply.github.com> Date: Mon, 12 Aug 2019 17:57:07 +0300 Subject: [PATCH 057/234] Feat/en 2674 add account pk in nodesSetup.json (#354) * Implementation done * Refactored after code review --- cmd/node/config/nodesSetup.json | 65 ++++-- cmd/node/factory/structs.go | 4 +- cmd/node/main.go | 9 +- consensus/mock/nodesCoordinatorMock.go | 22 +- consensus/mock/validatorMock.go | 15 +- integrationTests/consensus/testInitializer.go | 6 +- integrationTests/mock/nodesCoordinatorMock.go | 4 + .../multiShard/block/testInitializer.go | 2 +- .../multiShard/metablock/testInitializer.go | 2 +- .../smartContract/testInitilalizer.go | 2 +- .../singleShard/block/testInitializer.go | 2 +- integrationTests/testInitializer.go | 2 +- node/mock/nodesCoordinatorMock.go | 22 +- node/mock/validatorMock.go | 15 +- .../headerInterceptorBase_test.go | 6 +- .../interceptors/headerInterceptor_test.go | 12 +- process/mock/nodesCoordinatorMock.go | 41 +++- sharding/errors.go | 9 + sharding/export_test.go | 4 +- sharding/indexHashedNodesCoordinator.go | 17 ++ sharding/indexHashedNodesCoordinator_test.go | 135 +++++++++-- sharding/interface.go | 2 + sharding/mock/invalidNodesSetupMock.json | 6 +- sharding/mock/nodesCoordinatorMock.go | 22 +- sharding/mock/nodesSetupMock.json | 15 +- sharding/mock/validatorMock.go | 15 +- sharding/nodesSetup.go | 89 ++++++-- sharding/nodesSetup_test.go | 213 ++++++++++-------- sharding/validator.go | 25 +- sharding/validator_test.go | 29 ++- 30 files changed, 570 insertions(+), 242 deletions(-) diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index 1f0ee05446a..a655f12fdfb 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -8,67 +8,88 @@ "metaChainMinNodes": 1, "initialNodes": [ { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "d4105de8e44aee9d4be670401cec546e5df381028e805012386a05acf76518d9" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "d11e60011ffc1b7ebb1fd4c92c2821ecef8bed5c518d76a24640153a462cdc1e" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "0f36a982b79d3c1fda9b82a646a2b423cb3e7223cffbae73a4e3d2c1ea62ee5e" }, { - "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895" + "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895", + "address": "8c93db70abe14a6aa8c4ca7b722b67f4342b4251c0f3731b12b5f75885a9b9b6" }, { - "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58" + "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58", + "address": "afb051dc3a1dfb029866730243c2cbc51d8b8ef15951e4da3929f9c8391f307a" }, { - "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338" + "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338", + "address": "86fe0a4a9bf7dbed6784b8cfbfd5a80d927be30b4debff67e60e1fd05cd2359b" }, { - "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37" + "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37", + "address": "fdc635bc2bf1477609bea5ba90365a99d4bbb023b2eaffb5c20642a2f2458dfa" }, { - "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867" + "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867", + "address": "5bdf4c81489bea69ba29cd3eea2670c1bb6cb5d922fa8cb6e17bca71dfdd49f0" }, { - "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673" + "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673", + "address": "22c2e3721a6256a5891ba612ad55343dceb6655388176f981ab2885ed756d6fd" }, { - "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846" + "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846", + "address": "f9c28a8369df5ff3f8589a0aaad93d2d8f94f5ad70d898d422c964fdd6a87d0b" }, { - "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d" + "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d", + "address": "69e34e6a9e6aeb051f46e15cae1fe7d0f8641b6bcd9ff23ab228c78b1e4418af" }, { - "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321" + "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321", + "address": "d453e66ea50b05ec3c102cdaabbcee172136f53db82ba434ca170a53483d4ad1" }, { - "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e" + "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e", + "address": "04e61f7bf892ca638451f6efeccf069d7fb5a5c82303aa27e6d28725da8ae1df" }, { - "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f" + "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f", + "address": "97d0f43b88e104aa9b0cc98c5cea96f5468a59d3986d2d187b19319a5911b7ff" }, { - "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4" + "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4", + "address": "8e660d69a8d99e9cb15323c0c8db36f1f432231a1b9a74da8ffa44a2b9abc7fe" }, { - "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb" + "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb", + "address": "a901ae67ca50d4af01f813da27613f124137be835a5d6902697ec719b2df704f" }, { - "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca" + "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca", + "address": "6b0dcc478115c270f2a6c6a9809c04b61eff8a5877b837d86810396fdb50feda" }, { - "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6" + "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6", + "address": "c53b7e4463091a999e002c75ed55c79e1f4c64e91ca8ba1b72d984dea9c0e477" }, { - "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2" + "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2", + "address": "18e6af48dad7fd4902991efb019e741e0f2a7a192c8678b1da3f4cf42c164519" }, { - "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb" + "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb", + "address": "95fe2d76c72ada51156aed96d083c993d637d7a772fb48efeb8bc3f3cedc7237" }, { - "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa" + "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa", + "address": "d6ad6476141dd798dc7b009b92b8c2d50a8caff8452a459548aa5ccb6c11b6c3" } ] -} \ No newline at end of file +} diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 72c35206953..8ab6f965037 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -344,12 +344,12 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) return nil, errors.New("could not create multisig hasher: " + err.Error()) } - currentShardPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) + currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) if err != nil { return nil, errors.New("could not start creation of multiSigner: " + err.Error()) } - multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardPubKeys, args.privKey, args.keyGen) + multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) if err != nil { return nil, err } diff --git a/cmd/node/main.go b/cmd/node/main.go index 910c3308856..72fddc65944 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -744,14 +744,13 @@ func createNodesCoordinator( nbShards := nodesConfig.NumberOfShards() shardConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) metaConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) - initNodesPubKeys := nodesConfig.InitialNodesPubKeys() + initNodesInfo := nodesConfig.InitialNodesInfo() initValidators := make(map[uint32][]sharding.Validator) - for shardId, pubKeyList := range initNodesPubKeys { + for shardId, nodeInfoList := range initNodesInfo { validators := make([]sharding.Validator, 0) - for _, pubKey := range pubKeyList { - // TODO: the stake needs to be associated to the staking account - validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte(pubKey)) + for _, nodeInfo := range nodeInfoList { + validator, err := sharding.NewValidator(big.NewInt(0), 0, nodeInfo.PubKey(), nodeInfo.Address()) if err != nil { return nil, err } diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 3f16e81f374..d932396c81e 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -20,15 +20,15 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( } list := []sharding.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), } return list, nil @@ -68,3 +68,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/consensus/mock/validatorMock.go b/consensus/mock/validatorMock.go index cd22a51ce85..e4f9bf01af8 100644 --- a/consensus/mock/validatorMock.go +++ b/consensus/mock/validatorMock.go @@ -5,13 +5,14 @@ import ( ) type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} } func (vm *ValidatorMock) Stake() *big.Int { @@ -25,3 +26,7 @@ func (vm *ValidatorMock) Rating() int32 { func (vm *ValidatorMock) PubKey() []byte { return vm.pubKey } + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 919502cbea9..a90ae8c5aca 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "math/big" + "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" @@ -44,7 +46,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "math/big" ) const blsConsensusType = "bls" @@ -91,7 +92,8 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + address := fmt.Sprintf("addr_%d_%d", shardId, i) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(address)) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 328c50102af..8e08aa158e8 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -56,3 +56,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/integrationTests/multiShard/block/testInitializer.go b/integrationTests/multiShard/block/testInitializer.go index e5963a81cd0..f9b676188d4 100644 --- a/integrationTests/multiShard/block/testInitializer.go +++ b/integrationTests/multiShard/block/testInitializer.go @@ -112,7 +112,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/multiShard/metablock/testInitializer.go b/integrationTests/multiShard/metablock/testInitializer.go index 38f49319d58..8eb08c97227 100644 --- a/integrationTests/multiShard/metablock/testInitializer.go +++ b/integrationTests/multiShard/metablock/testInitializer.go @@ -100,7 +100,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 24f3f1035ac..e3259c66b41 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -119,7 +119,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/singleShard/block/testInitializer.go b/integrationTests/singleShard/block/testInitializer.go index 8b8c331a0bd..18af79daa1d 100644 --- a/integrationTests/singleShard/block/testInitializer.go +++ b/integrationTests/singleShard/block/testInitializer.go @@ -65,7 +65,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 25b52911b0d..9c10caa67ba 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -385,7 +385,7 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 3f16e81f374..d932396c81e 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -20,15 +20,15 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( } list := []sharding.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), } return list, nil @@ -68,3 +68,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/node/mock/validatorMock.go b/node/mock/validatorMock.go index cd22a51ce85..e4f9bf01af8 100644 --- a/node/mock/validatorMock.go +++ b/node/mock/validatorMock.go @@ -5,13 +5,14 @@ import ( ) type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} } func (vm *ValidatorMock) Stake() *big.Int { @@ -25,3 +26,7 @@ func (vm *ValidatorMock) Rating() int32 { func (vm *ValidatorMock) PubKey() []byte { return vm.pubKey } + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/process/block/interceptors/headerInterceptorBase_test.go b/process/block/interceptors/headerInterceptorBase_test.go index ece315a1f57..83592dd7b00 100644 --- a/process/block/interceptors/headerInterceptorBase_test.go +++ b/process/block/interceptors/headerInterceptorBase_test.go @@ -240,13 +240,15 @@ func createNodesCoordinator() sharding.NodesCoordinator { shardValidators := make([]sharding.Validator, 0) for i := 0; i < 16; i++ { pubKeyStr := fmt.Sprintf("pk_shard0_%d", i) - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pubKeyStr)) + addrStr := fmt.Sprintf("addr_shard0_%d", i) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pubKeyStr), []byte(addrStr)) shardValidators = append(shardValidators, v) } //metachain pubKeyBytes := []byte("pk_meta") - v, _ := sharding.NewValidator(big.NewInt(0), 1, pubKeyBytes) + addrBytes := []byte("addr_meta") + v, _ := sharding.NewValidator(big.NewInt(0), 1, pubKeyBytes, addrBytes) validators[0] = shardValidators validators[sharding.MetachainShardId] = []sharding.Validator{v} diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 2d165a07a07..9cebefc2fa6 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -28,7 +28,8 @@ func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32 shardNodes := make([]sharding.Validator, 0) for valIdx := uint32(0); valIdx < shardSize; valIdx++ { pk := fmt.Sprintf("pubKey_sh%d_node%d", shard, valIdx) - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk)) + addr := fmt.Sprintf("address_sh%d_node%d", shard, valIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) shardNodes = append(shardNodes, v) } nodes[shard] = shardNodes @@ -37,7 +38,8 @@ func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32 metaNodes := make([]sharding.Validator, 0) for mValIdx := uint32(0); mValIdx < metachainSize; mValIdx++ { pk := fmt.Sprintf("pubKey_meta_node%d", mValIdx) - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk)) + addr := fmt.Sprintf("address_meta_node%d", mValIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) metaNodes = append(metaNodes, v) } nodes[sharding.MetachainShardId] = metaNodes @@ -331,10 +333,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( shardCoordinator.SetNoShards(5) nodesCoordinator := &mock.NodesCoordinatorMock{ - NbShards: 5, + NbShards: 5, ShardConsensusSize: 1, - MetaConsensusSize: 1, - ShardId: 2, + MetaConsensusSize: 1, + ShardId: 2, } nodes := generateValidatorsMap(3, 3, 5) diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 26cbd7d33fe..a3570e16ff9 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -1,20 +1,23 @@ package mock import ( + "bytes" + "github.com/ElrondNetwork/elrond-go/sharding" ) // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]sharding.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) - LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error - ComputeValidatorsGroupCalled func(randomness []byte) (validatorsGroup []sharding.Validator, err error) + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) } func NewNodesCoordinatorMock() *NodesCoordinatorMock { @@ -103,3 +106,23 @@ func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sha return validatorsGroup, nil } + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} diff --git a/sharding/errors.go b/sharding/errors.go index 09af0881fce..71820dfa26a 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -31,6 +31,9 @@ var ErrNilAddressConverter = errors.New("trying to set nil address converter") // ErrCouldNotParsePubKey signals that a given public key could not be parsed var ErrCouldNotParsePubKey = errors.New("could not parse node's public key") +// ErrCouldNotParseAddress signals that a given address could not be parsed +var ErrCouldNotParseAddress = errors.New("could not parse node's address") + // ErrNegativeOrZeroConsensusGroupSize signals that an invalid consensus group size has been provided var ErrNegativeOrZeroConsensusGroupSize = errors.New("negative or zero consensus group size") @@ -72,3 +75,9 @@ var ErrNilStake = errors.New("nil stake") // ErrNegativeStake signals that the stake is negative var ErrNegativeStake = errors.New("negative stake") + +// ErrNilAddress signals that the address is nil +var ErrNilAddress = errors.New("nil address") + +// ErrValidatorNotFound signals that the validator has not been found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/sharding/export_test.go b/sharding/export_test.go index 85575748413..cf6427cb891 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -24,8 +24,8 @@ func (ns *NodesSetup) ProcessMetaChainAssigment() { ns.processMetaChainAssigment() } -func (ns *NodesSetup) CreateInitialNodesPubKeys() { - ns.createInitialNodesPubKeys() +func (ns *NodesSetup) CreateInitialNodesInfo() { + ns.createInitialNodesInfo() } func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) string { diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 9fe53d6c26f..bb716828470 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -116,6 +116,23 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byt return tempList, nil } +// GetValidatorWithPublicKey gets the validator with the given public key +func (ihgs *indexHashedNodesCoordinator) GetValidatorWithPublicKey(publicKey []byte) (Validator, uint32, error) { + if publicKey == nil { + return nil, 0, ErrNilPubKey + } + + for shardId, shardEligible := range ihgs.nodesMap { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, ErrValidatorNotFound +} + // GetValidatorsPublicKeys calculates the validators group for a specific randomness, // returning their public keys func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index fd12cb7b976..612e9e9f7c6 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -24,13 +24,13 @@ func uint64ToBytes(value uint64) []byte { func createDummyNodesMap() map[uint32][]sharding.Validator { list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")), } listMeta := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 1, []byte("pkMeta1")), - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pkMeta2")), + mock.NewValidatorMock(big.NewInt(1), 1, []byte("pkMeta1"), []byte("addrMeta1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pkMeta2"), []byte("addrMeta2")), } nodesMap := make(map[uint32][]sharding.Validator) @@ -190,7 +190,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSa t.Parallel() list := []sharding.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), } nodesMap := make(map[uint32][]sharding.Validator) @@ -269,8 +269,8 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd return nil } - validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) + validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")) list := []sharding.Validator{ validator0, @@ -279,7 +279,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - metaNode, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta")) + metaNode, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) nodesMap[sharding.MetachainShardId] = []sharding.Validator{metaNode} ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 2, @@ -369,16 +369,16 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho return convertBigIntToBytes(val) } - validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) - validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2")) - validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3")) - validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4")) - validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5")) - validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6")) - validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7")) - validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8")) - validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9")) + validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1"), []byte("addr1")) + validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2"), []byte("addr2")) + validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3"), []byte("addr3")) + validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4"), []byte("addr4")) + validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5"), []byte("addr5")) + validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6"), []byte("addr6")) + validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7"), []byte("addr7")) + validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8"), []byte("addr8")) + validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9"), []byte("addr9")) list := []sharding.Validator{ validator0, @@ -395,7 +395,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho nodesMap := make(map[uint32][]sharding.Validator) nodesMap[0] = list - validatorMeta, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta")) + validatorMeta, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) nodesMap[sharding.MetachainShardId] = []sharding.Validator{validatorMeta} ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 6, @@ -425,7 +425,7 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. //generate 400 validators for i := 0; i < 400; i++ { - list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)), []byte("addr"+strconv.Itoa(i)))) } nodesMap := make(map[uint32][]sharding.Validator) @@ -449,3 +449,98 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. assert.Equal(b, consensusGroupSize, len(list2)) } } + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrNilPubKey(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey(nil) + + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrValidatorNotFound(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey([]byte("pk1")) + + assert.Equal(t, sharding.ErrValidatorNotFound, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldWork(t *testing.T) { + t.Parallel() + + list_meta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_meta"), []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_meta"), []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_meta"), []byte("addr2_meta")), + } + list_shard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard0"), []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard0"), []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard0"), []byte("addr2_shard0")), + } + list_shard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard1"), []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard1"), []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard1"), []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = list_meta + nodesMap[0] = list_shard0 + nodesMap[1] = list_shard1 + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + mock.HasherMock{}, + 0, + 2, + nodesMap, + ) + + validator, shardId, err := ihgs.GetValidatorWithPublicKey([]byte("pk0_meta")) + assert.Nil(t, err) + assert.Equal(t, sharding.MetachainShardId, shardId) + assert.Equal(t, []byte("addr0_meta"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk1_shard0")) + assert.Nil(t, err) + assert.Equal(t, uint32(0), shardId) + assert.Equal(t, []byte("addr1_shard0"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk2_shard1")) + assert.Nil(t, err) + assert.Equal(t, uint32(1), shardId) + assert.Equal(t, []byte("addr2_shard1"), validator.Address()) +} diff --git a/sharding/interface.go b/sharding/interface.go index e42840e9166..8905a57ad2a 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -24,6 +24,7 @@ type Validator interface { Stake() *big.Int Rating() int32 PubKey() []byte + Address() []byte } // NodesCoordinator defines the behaviour of a struct able to do validator group selection @@ -31,6 +32,7 @@ type NodesCoordinator interface { PublicKeysSelector SetNodesPerShards(nodes map[uint32][]Validator) error ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) + GetValidatorWithPublicKey(publicKey []byte) (validator Validator, shardId uint32, err error) } // PublicKeysSelector allows retrieval of eligible validators public keys diff --git a/sharding/mock/invalidNodesSetupMock.json b/sharding/mock/invalidNodesSetupMock.json index 1da83d4d669..67458949a71 100644 --- a/sharding/mock/invalidNodesSetupMock.json +++ b/sharding/mock/invalidNodesSetupMock.json @@ -8,10 +8,12 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" } ] } diff --git a/sharding/mock/nodesCoordinatorMock.go b/sharding/mock/nodesCoordinatorMock.go index b72e78be5a1..9ad5515d724 100644 --- a/sharding/mock/nodesCoordinatorMock.go +++ b/sharding/mock/nodesCoordinatorMock.go @@ -17,15 +17,15 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup(randomness []byte) (valid } list := []sharding.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), } return list, nil @@ -65,3 +65,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/sharding/mock/nodesSetupMock.json b/sharding/mock/nodesSetupMock.json index 78110c8a6b6..17cf384c5b4 100644 --- a/sharding/mock/nodesSetupMock.json +++ b/sharding/mock/nodesSetupMock.json @@ -8,19 +8,24 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" }, { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022" } ] } diff --git a/sharding/mock/validatorMock.go b/sharding/mock/validatorMock.go index cd22a51ce85..e4f9bf01af8 100644 --- a/sharding/mock/validatorMock.go +++ b/sharding/mock/validatorMock.go @@ -5,13 +5,14 @@ import ( ) type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} } func (vm *ValidatorMock) Stake() *big.Int { @@ -25,3 +26,7 @@ func (vm *ValidatorMock) Rating() int32 { func (vm *ValidatorMock) PubKey() []byte { return vm.pubKey } + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/sharding/nodesSetup.go b/sharding/nodesSetup.go index 3e28c91137c..a7b9454f18d 100644 --- a/sharding/nodesSetup.go +++ b/sharding/nodesSetup.go @@ -9,9 +9,31 @@ import ( // InitialNode holds data from json type InitialNode struct { - PubKey string `json:"pubkey"` + PubKey string `json:"pubkey"` + Address string `json:"address"` + NodeInfo +} + +// NodeInfo holds node info +type NodeInfo struct { assignedShard uint32 pubKey []byte + address []byte +} + +// AssignedShard gets the node assigned shard +func (ni *NodeInfo) AssignedShard() uint32 { + return ni.assignedShard +} + +// Address gets the node address +func (ni *NodeInfo) Address() []byte { + return ni.address +} + +// PubKey gets the node public key +func (ni *NodeInfo) PubKey() []byte { + return ni.pubKey } // NodesSetup hold data for decoded data from json file @@ -30,7 +52,7 @@ type NodesSetup struct { nrOfShards uint32 nrOfNodes uint32 nrOfMetaChainNodes uint32 - allNodesPubKeys map[uint32][]string + allNodesInfo map[uint32][]*NodeInfo } // NewNodesSetup creates a new decoded nodes structure from json config file @@ -56,7 +78,7 @@ func NewNodesSetup(nodesFilePath string, numOfNodes uint64) (*NodesSetup, error) } nodes.processShardAssignment() - nodes.createInitialNodesPubKeys() + nodes.createInitialNodesInfo() return nodes, nil } @@ -68,6 +90,7 @@ func (ns *NodesSetup) processConfig() error { ns.nrOfMetaChainNodes = 0 for i := 0; i < len(ns.InitialNodes); i++ { ns.InitialNodes[i].pubKey, err = hex.DecodeString(ns.InitialNodes[i].PubKey) + ns.InitialNodes[i].address, err = hex.DecodeString(ns.InitialNodes[i].Address) // decoder treats empty string as correct, it is not allowed to have empty string as public key if ns.InitialNodes[i].PubKey == "" || err != nil { @@ -75,6 +98,12 @@ func (ns *NodesSetup) processConfig() error { return ErrCouldNotParsePubKey } + // decoder treats empty string as correct, it is not allowed to have empty string as address + if ns.InitialNodes[i].Address == "" || err != nil { + ns.InitialNodes[i].address = nil + return ErrCouldNotParseAddress + } + ns.nrOfNodes++ } @@ -139,35 +168,69 @@ func (ns *NodesSetup) processShardAssignment() { } } -func (ns *NodesSetup) createInitialNodesPubKeys() { +func (ns *NodesSetup) createInitialNodesInfo() { nrOfShardAndMeta := ns.nrOfShards if ns.MetaChainActive { nrOfShardAndMeta += 1 } - ns.allNodesPubKeys = make(map[uint32][]string, nrOfShardAndMeta) + ns.allNodesInfo = make(map[uint32][]*NodeInfo, nrOfShardAndMeta) for _, in := range ns.InitialNodes { - if in.pubKey != nil { - ns.allNodesPubKeys[in.assignedShard] = append(ns.allNodesPubKeys[in.assignedShard], string(in.pubKey)) + if in.pubKey != nil && in.address != nil { + ns.allNodesInfo[in.assignedShard] = append(ns.allNodesInfo[in.assignedShard], + &NodeInfo{in.assignedShard, in.pubKey, in.address}) } } } -// InitialNodesPubKeys - gets initial public keys +// InitialNodesPubKeys - gets initial nodes public keys func (ns *NodesSetup) InitialNodesPubKeys() map[uint32][]string { - return ns.allNodesPubKeys + allNodesPubKeys := make(map[uint32][]string, 0) + for shardId, nodesInfo := range ns.allNodesInfo { + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + allNodesPubKeys[shardId] = pubKeys + } + + return allNodesPubKeys +} + +// InitialNodesInfo - gets initial nodes info +func (ns *NodesSetup) InitialNodesInfo() map[uint32][]*NodeInfo { + return ns.allNodesInfo } -// InitialNodesPubKeysForShard - gets initial public keys +// InitialNodesPubKeysForShard - gets initial nodes public keys for shard func (ns *NodesSetup) InitialNodesPubKeysForShard(shardId uint32) ([]string, error) { - if ns.allNodesPubKeys[shardId] == nil { + if ns.allNodesInfo[shardId] == nil { + return nil, ErrShardIdOutOfRange + } + if len(ns.allNodesInfo[shardId]) == 0 { + return nil, ErrNoPubKeys + } + + nodesInfo := ns.allNodesInfo[shardId] + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + return pubKeys, nil +} + +// InitialNodesInfoForShard - gets initial nodes info for shard +func (ns *NodesSetup) InitialNodesInfoForShard(shardId uint32) ([]*NodeInfo, error) { + if ns.allNodesInfo[shardId] == nil { return nil, ErrShardIdOutOfRange } - if len(ns.allNodesPubKeys[shardId]) == 0 { + if len(ns.allNodesInfo[shardId]) == 0 { return nil, ErrNoPubKeys } - return ns.allNodesPubKeys[shardId], nil + return ns.allNodesInfo[shardId], nil } // NumberOfShards returns the calculated number of shards diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 4c327c4194b..fa4ed91cca6 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -8,13 +8,35 @@ import ( "github.com/stretchr/testify/assert" ) +var ( + PubKeys = []string{ + "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "24dea9b5c79174c558c38316b2df25b956c53f0d0128b7427d219834867cc1b0868b7faff0205fe23e5ffdf276acfad6423890c782c7be7b98a31d501e4276a015a54d9849109322130fc9a9cb61d183318d50fcde44fabcbf600051c7cb950304b05e82f90f2ac4647016f39439608cd64ccc82fe6e996289bb2150e4e3ab08", + } + + Address = []string{ + "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49", + "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836", + "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f", + "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac", + "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022", + "63f702e061385324a25dc4f1bcfc7e4f4692bcd80de71bd4dd7d6e2f67f92481", + } +) + func createNodesSetupOneShardOneNode() *sharding.NodesSetup { + noOfInitialNodes := 1 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 1) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] err := ns.ProcessConfig() if err != nil { @@ -26,25 +48,23 @@ func createNodesSetupOneShardOneNode() *sharding.NodesSetup { } ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShardTwoNodes() *sharding.NodesSetup { + noOfInitialNodes := 4 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 - ns.InitialNodes = make([]*sharding.InitialNode, 4) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} - ns.InitialNodes[3] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" - ns.InitialNodes[3].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() if err != nil { @@ -56,27 +76,23 @@ func createNodesSetupTwoShardTwoNodes() *sharding.NodesSetup { } ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard5Nodes() *sharding.NodesSetup { + noOfInitialNodes := 5 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 - ns.InitialNodes = make([]*sharding.InitialNode, 5) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} - ns.InitialNodes[3] = &sharding.InitialNode{} - ns.InitialNodes[4] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" - ns.InitialNodes[3].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416" - ns.InitialNodes[4].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() if err != nil { @@ -88,32 +104,26 @@ func createNodesSetupTwoShard5Nodes() *sharding.NodesSetup { } ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 ns.MetaChainActive = true ns.MetaChainMinNodes = 2 ns.MetaChainConsensusGroupSize = 2 - ns.InitialNodes = make([]*sharding.InitialNode, 6) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} - ns.InitialNodes[3] = &sharding.InitialNode{} - ns.InitialNodes[4] = &sharding.InitialNode{} - ns.InitialNodes[5] = &sharding.InitialNode{} - - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" - ns.InitialNodes[3].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416" - ns.InitialNodes[4].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411" - ns.InitialNodes[5].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7410" + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() if err != nil { @@ -125,7 +135,7 @@ func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { } ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } @@ -162,20 +172,23 @@ func TestNodesSetup_NewNodesShouldTrimInitialNodesList(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysFromNil(t *testing.T) { ns := sharding.NodesSetup{} - inPubKeys := ns.InitialNodesPubKeys() + inPubKeys := ns.InitialNodesInfo() assert.NotNil(t, ns) assert.Nil(t, inPubKeys) } func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{} - ns.InitialNodes = make([]*sharding.InitialNode, 2) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + ns.InitialNodes[0] = &sharding.InitialNode{} ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] err := ns.ProcessConfig() @@ -184,17 +197,19 @@ func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 0, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -203,6 +218,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -211,12 +227,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi MetaChainActive: true, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -225,17 +242,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -244,6 +263,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesSh } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -253,11 +273,12 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -266,17 +287,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -285,6 +308,7 @@ func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 1 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -293,12 +317,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing MetaChainActive: true, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -307,17 +332,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing } func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -326,6 +353,7 @@ func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardSho } func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 3 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -334,14 +362,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar MetaChainMinNodes: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 3) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -351,7 +378,7 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { ns := sharding.NodesSetup{} - inPK, err := ns.InitialNodesPubKeysForShard(0) + inPK, err := ns.InitialNodesInfoForShard(0) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -360,7 +387,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { ns := createNodesSetupOneShardOneNode() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -369,7 +396,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardGood(t *testing.T) { ns := createNodesSetupTwoShardTwoNodes() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) assert.Equal(t, len(inPK), 2) @@ -379,7 +406,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardGood(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardWrongMeta(t *testing.T) { ns := createNodesSetupTwoShardTwoNodes() metaId := sharding.MetachainShardId - inPK, err := ns.InitialNodesPubKeysForShard(metaId) + inPK, err := ns.InitialNodesInfoForShard(metaId) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -389,7 +416,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardWrongMeta(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - inPK, err := ns.InitialNodesPubKeysForShard(metaId) + inPK, err := ns.InitialNodesInfoForShard(metaId) assert.NotNil(t, ns) assert.Equal(t, len(inPK), 2) @@ -399,7 +426,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardGoodMeta(t *testing.T) { func TestNodesSetup_PublicKeyNotGood(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - _, err := ns.GetShardIDForPubKey([]byte("5126b6505a73e59a994caa8f956f8c335d4399229de42102bb4814ca261c7419")) + _, err := ns.GetShardIDForPubKey([]byte(PubKeys[0])) assert.NotNil(t, ns) assert.NotNil(t, err) @@ -407,7 +434,7 @@ func TestNodesSetup_PublicKeyNotGood(t *testing.T) { func TestNodesSetup_PublicKeyGood(t *testing.T) { ns := createNodesSetupTwoShard5Nodes() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) @@ -418,7 +445,7 @@ func TestNodesSetup_PublicKeyGood(t *testing.T) { func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) @@ -430,7 +457,7 @@ func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { func TestNodesSetup_MetaPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418") + publicKey, err := hex.DecodeString(PubKeys[0]) selfId, err := ns.GetShardIDForPubKey(publicKey) diff --git a/sharding/validator.go b/sharding/validator.go index e6d4a65913f..8190a96d6fb 100644 --- a/sharding/validator.go +++ b/sharding/validator.go @@ -5,13 +5,14 @@ import ( ) type validator struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } // NewValidator creates a new instance of a validator -func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, error) { +func NewValidator(stake *big.Int, rating int32, pubKey []byte, address []byte) (*validator, error) { if stake == nil { return nil, ErrNilStake } @@ -24,10 +25,15 @@ func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, erro return nil, ErrNilPubKey } + if address == nil { + return nil, ErrNilAddress + } + return &validator{ - stake: stake, - rating: rating, - pubKey: pubKey, + stake: stake, + rating: rating, + pubKey: pubKey, + address: address, }, nil } @@ -45,3 +51,8 @@ func (v *validator) Rating() int32 { func (v *validator) PubKey() []byte { return v.pubKey } + +// Address returns the validator's address +func (v *validator) Address() []byte { + return v.address +} diff --git a/sharding/validator_test.go b/sharding/validator_test.go index a0db5f5e122..c0f3953e005 100644 --- a/sharding/validator_test.go +++ b/sharding/validator_test.go @@ -11,7 +11,7 @@ import ( func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { t.Parallel() - validator, err := sharding.NewValidator(nil, 0, []byte("pk1")) + validator, err := sharding.NewValidator(nil, 0, []byte("pk1"), []byte("addr1")) assert.Nil(t, validator) assert.Equal(t, sharding.ErrNilStake, err) @@ -20,7 +20,7 @@ func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { t.Parallel() - validator, err := sharding.NewValidator(big.NewInt(-1), 0, []byte("pk1")) + validator, err := sharding.NewValidator(big.NewInt(-1), 0, []byte("pk1"), []byte("addr1")) assert.Nil(t, validator) assert.Equal(t, sharding.ErrNegativeStake, err) @@ -29,16 +29,25 @@ func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { t.Parallel() - validator, err := sharding.NewValidator(big.NewInt(0), 0, nil) + validator, err := sharding.NewValidator(big.NewInt(0), 0, nil, []byte("addr1")) assert.Nil(t, validator) assert.Equal(t, sharding.ErrNilPubKey, err) } +func TestValidator_NewValidatorShouldFailOnNilAddress(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), nil) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilAddress, err) +} + func TestValidator_NewValidatorShouldWork(t *testing.T) { t.Parallel() - validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) assert.NotNil(t, validator) assert.Nil(t, err) @@ -47,7 +56,7 @@ func TestValidator_NewValidatorShouldWork(t *testing.T) { func TestValidator_StakeShouldWork(t *testing.T) { t.Parallel() - validator, _ := sharding.NewValidator(big.NewInt(1), 0, []byte("pk1")) + validator, _ := sharding.NewValidator(big.NewInt(1), 0, []byte("pk1"), []byte("addr1")) assert.Equal(t, big.NewInt(1), validator.Stake()) } @@ -55,7 +64,15 @@ func TestValidator_StakeShouldWork(t *testing.T) { func TestValidator_PubKeyShouldWork(t *testing.T) { t.Parallel() - validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1")) + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) assert.Equal(t, []byte("pk1"), validator.PubKey()) } + +func TestValidator_AddressShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, []byte("addr1"), validator.Address()) +} From 56132648c1e4fb511d1173ee708799be0a5b6766 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 16 Aug 2019 10:17:32 +0300 Subject: [PATCH 058/234] cmd, consensus, process, sharding, node, integrationTests: fix block signature verifications for other chains several fixes for fee transactions --- cmd/node/factory/structs.go | 12 +++- cmd/node/main.go | 5 +- consensus/mock/nodesCoordinatorMock.go | 18 ++--- .../spos/commonSubround/subroundStartRound.go | 8 ++- .../commonSubround/subroundStartRound_test.go | 14 +++- consensus/spos/consensusState.go | 10 ++- consensus/spos/consensusState_test.go | 14 ++-- integrationTests/mock/nodesCoordinatorMock.go | 22 +++--- .../block/interceptedRequestHdr_test.go | 19 ++--- node/mock/nodesCoordinatorMock.go | 22 +++--- node/nodeTesting.go | 12 ++-- process/block/interceptedBlockHeader.go | 4 +- process/block/interceptedMetaBlockHeader.go | 2 +- process/errors.go | 6 ++ process/mock/nodesCoordinatorMock.go | 32 +++++---- process/transaction/process.go | 4 ++ process/unsigned/feeTxHandler.go | 6 +- sharding/indexHashedNodesCoordinator.go | 70 ++++++++++++------- sharding/indexHashedNodesCoordinator_test.go | 59 +++++++++++----- sharding/interface.go | 6 +- 20 files changed, 226 insertions(+), 119 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 8ab6f965037..97df87d7df3 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1307,16 +1307,24 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + //TODO: replace with valid address + communityAddress, _ := hex.DecodeString("1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420") + burnAddress := communityAddress + leaderAddress := communityAddress + // TODO: construct this correctly on the PR specialAddressHolder, err := address.NewSpecialAddressHolder( - []byte("elrond"), - []byte("own"), + communityAddress, + burnAddress, state.AddressConverter, shardCoordinator) if err != nil { return nil, nil, err } + // TODO: remove when valid leader address is set in each round + specialAddressHolder.SetLeaderAddress(leaderAddress) + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, core.Marshalizer, diff --git a/cmd/node/main.go b/cmd/node/main.go index 72fddc65944..4ea30df6361 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -912,10 +912,7 @@ func createNode( if err != nil { return nil, err } - err = nd.StartHeartbeat(config.Heartbeat) - if err != nil { - return nil, err - } + err = nd.CreateShardGenesisBlock() if err != nil { return nil, err diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index d932396c81e..420b4bedab8 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -7,16 +7,18 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, + round uint64, + shardId uint32, ) (validatorsGroup []sharding.Validator, err error) { if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness) + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) } list := []sharding.Validator{ @@ -34,12 +36,12 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness) + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } - validators, err := ncm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err } @@ -53,7 +55,7 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]st return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { +func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } @@ -65,7 +67,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 56050104770..f9f915b10f8 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -181,11 +181,13 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error } } - randomSource := fmt.Sprintf("%d-%s", roundIndex, core.ToB64(currentHeader.GetRandSeed())) + randomSource := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", randomSource)) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", core.ToB64(randomSource))) - nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, sr.NodesCoordinator()) + shardId := sr.ShardCoordinator().SelfId() + + nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, uint64(sr.RoundIndex), shardId, sr.NodesCoordinator()) if err != nil { return err } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 7a9defc8bf3..80c75c1645a 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -325,7 +325,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon validatorGroupSelector := mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() @@ -341,7 +341,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t t.Parallel() validatorGroupSelector := mock.NodesCoordinatorMock{} - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return make([]sharding.Validator, 0), nil } @@ -430,7 +434,11 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing validatorGroupSelector := mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]sharding.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 3e69fe5c584..c88db5e3dd9 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -95,9 +95,15 @@ func (cns *ConsensusState) GetLeader() (string, error) { // GetNextConsensusGroup gets the new consensus group for the current round based on current eligible list and a random // source for the new selection -func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs sharding.NodesCoordinator) ([]string, +func (cns *ConsensusState) GetNextConsensusGroup( + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator sharding.NodesCoordinator, +) ([]string, error) { - validatorsGroup, err := vgs.ComputeValidatorsGroup([]byte(randomSource)) + + validatorsGroup, err := nodesCoordinator.ComputeValidatorsGroup(randomSource, round, shardId) if err != nil { return nil, err diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index d9f5a8c0613..04dc63cfc66 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -138,13 +138,17 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou cns := internalInitConsensusState() - vgs := mock.NodesCoordinatorMock{} + nodesCoordinator := mock.NodesCoordinatorMock{} err := errors.New("error") - vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]sharding.Validator, error) { + nodesCoordinator.ComputeValidatorsGroupCalled = func( + randomness []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } - _, err2 := cns.GetNextConsensusGroup("", vgs) + _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator) assert.Equal(t, err, err2) } @@ -153,9 +157,9 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { cns := internalInitConsensusState() - vgs := mock.NodesCoordinatorMock{} + nodesCoordinator := mock.NodesCoordinatorMock{} - nextConsensusGroup, err := cns.GetNextConsensusGroup("", vgs) + nextConsensusGroup, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) } diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 8e08aa158e8..24adb024fbc 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -5,16 +5,18 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, + round uint64, + shardId uint32, ) (validatorsGroup []sharding.Validator, err error) { if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness) + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) } list := []sharding.Validator{} @@ -22,12 +24,16 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness) + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } - validators, err := ncm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err } @@ -41,7 +47,7 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]st return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { +func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } @@ -53,7 +59,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index de48aa4988f..ced68694426 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -39,7 +39,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { validatorsMap := genValidatorsFromPubKeys(keysMap) nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator( - 1, + 2, 1, hasher, 0, @@ -47,7 +47,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { validatorsMap, ) nodesCoordinator2, _ := sharding.NewIndexHashedNodesCoordinator( - 1, + 2, 1, hasher, 0, @@ -56,7 +56,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { ) fmt.Println("Requester:") - nRequester, mesRequester, multiSigner, resolversFinder := createNetNode( + nRequester, mesRequester, multiSigner1, resolversFinder := createNetNode( dPoolRequester, storeRequester, createAccountsDB(), @@ -67,7 +67,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { ) fmt.Println("Resolver:") - nResolver, mesResolver, _, _ := createNetNode( + nResolver, mesResolver, multiSigner2, _ := createNetNode( dPoolResolver, storeResolver, createAccountsDB(), @@ -122,10 +122,13 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { hdrBuff, _ := marshalizer.Marshal(&hdr1) hdrHash := hasher.Compute(string(hdrBuff)) - msig, _ := multiSigner.Create(keysMap[0], 0) - bitmap := []byte{1} - _, _ = msig.CreateSignatureShare(hdrHash, bitmap) - aggSig, _ := msig.AggregateSigs(bitmap) + msig0, _ := multiSigner1.Create(keysMap[0], 0) + msig1, _ := multiSigner2.Create(keysMap[0], 1) + bitmap := []byte{3} + _, _ = msig0.CreateSignatureShare(hdrHash, bitmap) + sig1, _ := msig1.CreateSignatureShare(hdrHash, bitmap) + _ = msig0.StoreSignatureShare(1, sig1) + aggSig, _ := msig0.AggregateSigs(bitmap) hdr1.PubKeysBitmap = bitmap hdr1.Signature = aggSig diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index d932396c81e..b3d1f307dea 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -7,16 +7,18 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, + round uint64, + shardId uint32, ) (validatorsGroup []sharding.Validator, err error) { if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness) + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) } list := []sharding.Validator{ @@ -34,12 +36,16 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness) + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } - validators, err := ncm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err } @@ -53,7 +59,7 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]st return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { +func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } @@ -65,7 +71,7 @@ func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } diff --git a/node/nodeTesting.go b/node/nodeTesting.go index d4795a7556d..df0e5596aa8 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -263,11 +263,13 @@ func (n *Node) generateAndSignSingleTx( } tx := transaction.Transaction{ - Nonce: nonce, - Value: value, - RcvAddr: rcvAddrBytes, - SndAddr: sndAddrBytes, - Data: data, + Nonce: nonce, + Value: value, + GasLimit: 100, + GasPrice: 10, + RcvAddr: rcvAddrBytes, + SndAddr: sndAddrBytes, + Data: data, } marshalizedTx, err := n.marshalizer.Marshal(&tx) diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 7940ca88fbb..0aaa4a76c80 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -53,7 +53,7 @@ func (inHdr *InterceptedHeader) Shard() uint32 { return inHdr.ShardId } -// GetShardHeader returns the Header pointer that holds the data +// GetHeader returns the Header pointer that holds the data func (inHdr *InterceptedHeader) GetHeader() *block.Header { return inHdr.Header } @@ -129,7 +129,7 @@ func (inHdr *InterceptedHeader) VerifySig() error { } - consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed) + consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed, inHdr.Round, inHdr.ShardId) if err != nil { return err } diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index b033cf4c16b..4cd094e3443 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -121,7 +121,7 @@ func (imh *InterceptedMetaHeader) VerifySig() error { } - consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed) + consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed, imh.Round, imh.GetShardID()) if err != nil { return err } diff --git a/process/errors.go b/process/errors.go index 98ff9d47de8..11c0cffbed2 100644 --- a/process/errors.go +++ b/process/errors.go @@ -421,6 +421,12 @@ var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") // ErrTxsFeesDoesNotMatch signals that txs fees do not match var ErrTxsFeesDoesNotMatch = errors.New("calculated tx fees with block tx fee does not match") +// ErrTxsFeesNotFound signals that the tx fee not found +var ErrTxsFeesNotFound = errors.New("tx fees not found") + +// ErrTotalTxsFeesDoNotMatch signals that the total tx fee do not match +var ErrTotalTxsFeesDoNotMatch = errors.New("total tx fees do not match") + // ErrNilTxTypeHandler signals that tx type handler is nil var ErrNilTxTypeHandler = errors.New("nil tx type handler") diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index a3570e16ff9..324cd59e858 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -13,10 +13,10 @@ type NodesCoordinatorMock struct { MetaConsensusSize uint32 ShardId uint32 NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error - ComputeValidatorsGroupCalled func(randomness []byte) (validatorsGroup []sharding.Validator, err error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) } @@ -30,9 +30,9 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } -func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { if ncm.GetSelectedPublicKeysCalled != nil { - return ncm.GetSelectedPublicKeysCalled(selection) + return ncm.GetSelectedPublicKeysCalled(selection, shardId) } if len(ncm.Validators) == 0 { @@ -41,19 +41,23 @@ func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (public pubKeys := make([]string, 0) - for _, v := range ncm.Validators[ncm.ShardId] { + for _, v := range ncm.Validators[shardId] { pubKeys = append(pubKeys, string(v.PubKey())) } return pubKeys, nil } -func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness) + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } - validators, err := ncm.ComputeValidatorsGroup(randomness) + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err } @@ -81,11 +85,15 @@ func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.V return nil } -func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sharding.Validator, error) { +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { var consensusSize uint32 if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomess) + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) } if ncm.ShardId == sharding.MetachainShardId { @@ -101,7 +109,7 @@ func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup(randomess []byte) ([]sha validatorsGroup := make([]sharding.Validator, 0) for i := uint32(0); i < consensusSize; i++ { - validatorsGroup = append(validatorsGroup, ncm.Validators[ncm.ShardId][i]) + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) } return validatorsGroup, nil diff --git a/process/transaction/process.go b/process/transaction/process.go index fe827e07b5c..0731b674961 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -213,6 +213,10 @@ func (txProc *txProcessor) processMoveBalance( } } + if currFeeTx == nil || currFeeTx.IsInterfaceNil() { + return nil + } + txProc.txFeeHandler.AddProcessedUTx(currFeeTx) return nil diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index f0c97cc717b..5dd89db7b15 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -128,7 +128,7 @@ func (ftxh *feeTxHandler) CleanProcessedUTxs() { func (ftxh *feeTxHandler) AddTxFeeFromBlock(tx data.TransactionHandler) { currFeeTx, ok := tx.(*feeTx.FeeTx) if !ok { - log.Debug(process.ErrWrongTypeAssertion.Error()) + log.Error(process.ErrWrongTypeAssertion.Error()) return } @@ -235,7 +235,7 @@ func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { txFromBlock, ok := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] if !ok { - return process.ErrTxsFeesDoesNotMatch + return process.ErrTxsFeesNotFound } if txFromBlock.Value.Cmp(value.GetValue()) != 0 { return process.ErrTxsFeesDoesNotMatch @@ -243,7 +243,7 @@ func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { } if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTxsFeesDoesNotMatch + return process.ErrTotalTxsFeesDoNotMatch } return nil diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index bb716828470..34dcec5ae50 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -3,6 +3,8 @@ package sharding import ( "bytes" "encoding/binary" + "fmt" + "github.com/ElrondNetwork/elrond-go/core" "math/big" "github.com/ElrondNetwork/elrond-go/hashing" @@ -13,7 +15,6 @@ type indexHashedNodesCoordinator struct { shardId uint32 hasher hashing.Hasher nodesMap map[uint32][]Validator - expandedEligibleList []Validator shardConsensusGroupSize int metaConsensusGroupSize int } @@ -48,7 +49,6 @@ func NewIndexHashedNodesCoordinator( shardId: shardId, hasher: hasher, nodesMap: make(map[uint32][]Validator), - expandedEligibleList: make([]Validator, 0), shardConsensusGroupSize: shardConsensusGroupSize, metaConsensusGroupSize: metaConsensusGroupSize, } @@ -80,7 +80,6 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Va } ihgs.nodesMap = nodes - ihgs.expandedEligibleList = ihgs.expandEligibleList() return nil } @@ -88,29 +87,41 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Va // ComputeValidatorsGroup will generate a list of validators based on the the eligible list, // consensus group size and a randomness source // Steps: -// 1. generate expanded eligible list by multiplying entries from eligible list according to stake and rating -> TODO +// 1. generate expanded eligible list by multiplying entries from shards' eligible list according to stake and rating -> TODO // 2. for each value in [0, consensusGroupSize), compute proposedindex = Hash( [index as string] CONCAT randomness) % len(eligible list) // 3. if proposed index is already in the temp validator list, then proposedIndex++ (and then % len(eligible list) as to not // exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until // the item at the new proposed index is not found in the list. This new proposed index will be called checked index // 4. the item at the checked index is appended in the temp validator list -func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) { +func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []Validator, err error) { if randomness == nil { return nil, ErrNilRandomness } + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + if ihgs == nil { return nil, ErrNilRandomness } tempList := make([]Validator, 0) - cSize := ihgs.consensusGroupSize() + cSize := ihgs.consensusGroupSize(shardId) + randomness = []byte(fmt.Sprintf("%d-%s", round, core.ToB64(randomness))) - for startIdx := 0; startIdx < cSize; startIdx++ { - proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) + // TODO: pre-compute eligible list and update only on rating change. + expandedList := ihgs.expandEligibleList(shardId) + lenExpandedList := len(expandedList) - checkedIndex := ihgs.checkIndex(proposedIndex, tempList) - tempList = append(tempList, ihgs.expandedEligibleList[checkedIndex]) + for startIdx := 0; startIdx < cSize; startIdx++ { + proposedIndex := ihgs.computeListIndex(startIdx, lenExpandedList, string(randomness)) + checkedIndex := ihgs.checkIndex(proposedIndex, expandedList, tempList) + tempList = append(tempList, expandedList[checkedIndex]) } return tempList, nil @@ -135,8 +146,8 @@ func (ihgs *indexHashedNodesCoordinator) GetValidatorWithPublicKey(publicKey []b // GetValidatorsPublicKeys calculates the validators group for a specific randomness, // returning their public keys -func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { - consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness) +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err } @@ -152,16 +163,20 @@ func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []by // GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap // TODO: This function needs to be revised when the requirements are clarified -func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { +func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte - shardEligibleLen := uint16(len(ihgs.nodesMap[ihgs.shardId])) + shardEligibleLen := uint16(len(ihgs.nodesMap[shardId])) invalidSelection := selectionLen < shardEligibleLen if invalidSelection { return nil, ErrEligibleSelectionMismatch } - cSize := ihgs.consensusGroupSize() + cSize := ihgs.consensusGroupSize(shardId) publicKeys = make([]string, cSize) cnt := 0 @@ -172,7 +187,7 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) continue } - publicKeys[cnt] = string(ihgs.nodesMap[ihgs.shardId][i].PubKey()) + publicKeys[cnt] = string(ihgs.nodesMap[shardId][i].PubKey()) cnt++ if cnt > cSize { @@ -187,13 +202,13 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte) return publicKeys, nil } -func (ihgs *indexHashedNodesCoordinator) expandEligibleList() []Validator { +func (ihgs *indexHashedNodesCoordinator) expandEligibleList(shardId uint32) []Validator { //TODO implement an expand eligible list variant - return ihgs.nodesMap[ihgs.shardId] + return ihgs.nodesMap[shardId] } // computeListIndex computes a proposed index from expanded eligible list -func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, randomSource string) int { +func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, lenList int, randomSource string) int { buffCurrentIndex := make([]byte, 8) binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) @@ -201,7 +216,7 @@ func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, rand computedLargeIndex := big.NewInt(0) computedLargeIndex.SetBytes(indexHash) - lenExpandedEligibleList := big.NewInt(int64(len(ihgs.expandedEligibleList))) + lenExpandedEligibleList := big.NewInt(int64(lenList)) // computedListIndex = computedLargeIndex % len(expandedEligibleList) computedListIndex := big.NewInt(0).Mod(computedLargeIndex, lenExpandedEligibleList).Int64() @@ -210,14 +225,18 @@ func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, rand } // checkIndex returns a checked index starting from a proposed index -func (ihgs *indexHashedNodesCoordinator) checkIndex(proposedIndex int, selectedList []Validator) int { +func (ihgs *indexHashedNodesCoordinator) checkIndex( + proposedIndex int, + eligibleList []Validator, + selectedList []Validator, +) int { for { - v := ihgs.expandedEligibleList[proposedIndex] + v := eligibleList[proposedIndex] if ihgs.validatorIsInList(v, selectedList) { proposedIndex++ - proposedIndex = proposedIndex % len(ihgs.expandedEligibleList) + proposedIndex = proposedIndex % len(eligibleList) continue } @@ -236,9 +255,8 @@ func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []V return false } -// consensusGroupSize returns the consensus group size for the node's shard -func (ihgs *indexHashedNodesCoordinator) consensusGroupSize() int { - if ihgs.shardId == MetachainShardId { +func (ihgs *indexHashedNodesCoordinator) consensusGroupSize(shardId uint32) int { + if shardId == MetachainShardId { return ihgs.metaConsensusGroupSize } diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 612e9e9f7c6..4f8745b8af6 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -2,6 +2,8 @@ package sharding_test import ( "encoding/binary" + "fmt" + "github.com/ElrondNetwork/elrond-go/core" "math/big" "strconv" "testing" @@ -40,6 +42,10 @@ func createDummyNodesMap() map[uint32][]sharding.Validator { return nodesMap } +func genRandSource(round uint64, randomness string) string { + return fmt.Sprintf("%d-%s", round, core.ToB64([]byte(randomness))) +} + //------- NewIndexHashedNodesCoordinator func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { @@ -178,12 +184,31 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t nodesMap, ) - list2, err := ihgs.ComputeValidatorsGroup(nil) + list2, err := ihgs.ComputeValidatorsGroup(nil, 0, 0) assert.Nil(t, list2) assert.Equal(t, sharding.ErrNilRandomness, err) } +func TestIndexHashedGroupSelector_ComputeValidatorsGroupInvalidShardIdShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte("radomness"), 0, 5) + + assert.Nil(t, list2) + assert.Equal(t, sharding.ErrInvalidShardId, err) +} + //------- functionality tests func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { @@ -204,7 +229,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSa nodesMap, ) - list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) + list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness"), 0, 0) assert.Nil(t, err) assert.Equal(t, list, list2) @@ -241,7 +266,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi 1, nodesMap) - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) assert.Nil(t, err) assert.Equal(t, nodesMap[0], list2) @@ -253,16 +278,17 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd hasher := &mock.HasherStub{} randomness := "randomness" + randSource := genRandSource(0, randomness) //this will return the list in reverse order: //element 0 will be the second //element 1 will be the first hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { + if string(uint64ToBytes(0))+randSource == s { return convertBigIntToBytes(big.NewInt(1)) } - if string(uint64ToBytes(1))+randomness == s { + if string(uint64ToBytes(1))+randSource == s { return convertBigIntToBytes(big.NewInt(0)) } @@ -290,7 +316,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd nodesMap, ) - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) assert.Nil(t, err) assert.Equal(t, validator0, list2[1]) @@ -329,7 +355,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex nodesMap, ) - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) assert.Nil(t, err) assert.Equal(t, nodesMap[0], list2) @@ -341,6 +367,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho hasher := &mock.HasherStub{} randomness := "randomness" + randomnessWithRound := genRandSource(0, randomness) //script: // for index 0, hasher will return 11 which will translate to 1, so 1 is the first element @@ -350,14 +377,14 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, // 3 is the 4-th element // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element - script := make(map[string]*big.Int) - script[string(uint64ToBytes(0))+randomness] = big.NewInt(11) //will translate to 1, add 1 - script[string(uint64ToBytes(1))+randomness] = big.NewInt(1) //will translate to 1, add 2 - script[string(uint64ToBytes(2))+randomness] = big.NewInt(9) //will translate to 9, add 9 - script[string(uint64ToBytes(3))+randomness] = big.NewInt(9) //will translate to 9, add 0 - script[string(uint64ToBytes(4))+randomness] = big.NewInt(0) //will translate to 0, add 3 - script[string(uint64ToBytes(5))+randomness] = big.NewInt(9) //will translate to 9, add 4 + + script[string(uint64ToBytes(0))+randomnessWithRound] = big.NewInt(11) //will translate to 1, add 1 + script[string(uint64ToBytes(1))+randomnessWithRound] = big.NewInt(1) //will translate to 1, add 2 + script[string(uint64ToBytes(2))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 9 + script[string(uint64ToBytes(3))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 0 + script[string(uint64ToBytes(4))+randomnessWithRound] = big.NewInt(0) //will translate to 0, add 3 + script[string(uint64ToBytes(5))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 4 hasher.ComputeCalled = func(s string) []byte { val, ok := script[s] @@ -406,7 +433,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho nodesMap, ) - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) assert.Nil(t, err) assert.Equal(t, 6, len(list2)) @@ -444,7 +471,7 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. for i := 0; i < b.N; i++ { randomness := strconv.Itoa(i) - list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness)) + list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) assert.Equal(b, consensusGroupSize, len(list2)) } diff --git a/sharding/interface.go b/sharding/interface.go index 8905a57ad2a..41ca3e34442 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -31,12 +31,12 @@ type Validator interface { type NodesCoordinator interface { PublicKeysSelector SetNodesPerShards(nodes map[uint32][]Validator) error - ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) + ComputeValidatorsGroup(randomness []byte, round uint64, shardId uint32) (validatorsGroup []Validator, err error) GetValidatorWithPublicKey(publicKey []byte) (validator Validator, shardId uint32, err error) } // PublicKeysSelector allows retrieval of eligible validators public keys type PublicKeysSelector interface { - GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) - GetValidatorsPublicKeys(randomness []byte) ([]string, error) + GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) } From da325c71a318710ec68b7f95305f27405625d04e Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 16 Aug 2019 10:29:18 +0300 Subject: [PATCH 059/234] process: update fee trasaction verified errors in unit test --- process/unsigned/feeTxHandler_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/feeTxHandler_test.go index 8422784605f..dc918403543 100644 --- a/process/unsigned/feeTxHandler_test.go +++ b/process/unsigned/feeTxHandler_test.go @@ -192,13 +192,13 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) err = th.VerifyCreatedUTxs() - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTxsFeesNotFound, err) badValue := big.NewInt(100) th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) err = th.VerifyCreatedUTxs() - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) th.CleanProcessedUTxs() @@ -208,7 +208,7 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) err = th.VerifyCreatedUTxs() - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTxsFeesNotFound, err) th.CleanProcessedUTxs() @@ -264,13 +264,13 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTxsFeesNotFound, err) badValue := big.NewInt(100) th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) th.CleanProcessedUTxs() @@ -280,7 +280,7 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTxsFeesDoesNotMatch, err) + assert.Equal(t, process.ErrTxsFeesNotFound, err) th.CleanProcessedUTxs() From 01466c56fa9b87fa20bc570830413314dbfe8bfb Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 16 Aug 2019 14:25:34 +0300 Subject: [PATCH 060/234] consensus, sharding: fix review comments - rename variables, sort imports --- .../spos/commonSubround/subroundStartRound.go | 13 ++++++++++--- sharding/indexHashedNodesCoordinator.go | 14 +++++++------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index f9f915b10f8..e66b86a29d3 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -181,13 +181,20 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error } } - randomSource := currentHeader.GetRandSeed() + randomSeed := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", core.ToB64(randomSource))) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", + core.ToB64(randomSeed)), + ) shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, uint64(sr.RoundIndex), shardId, sr.NodesCoordinator()) + nextConsensusGroup, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + ) if err != nil { return err } diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 34dcec5ae50..58fc4dded67 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -4,9 +4,9 @@ import ( "bytes" "encoding/binary" "fmt" - "github.com/ElrondNetwork/elrond-go/core" "math/big" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/hashing" ) @@ -111,14 +111,14 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup( } tempList := make([]Validator, 0) - cSize := ihgs.consensusGroupSize(shardId) + consensusSize := ihgs.consensusGroupSize(shardId) randomness = []byte(fmt.Sprintf("%d-%s", round, core.ToB64(randomness))) // TODO: pre-compute eligible list and update only on rating change. expandedList := ihgs.expandEligibleList(shardId) lenExpandedList := len(expandedList) - for startIdx := 0; startIdx < cSize; startIdx++ { + for startIdx := 0; startIdx < consensusSize; startIdx++ { proposedIndex := ihgs.computeListIndex(startIdx, lenExpandedList, string(randomness)) checkedIndex := ihgs.checkIndex(proposedIndex, expandedList, tempList) tempList = append(tempList, expandedList[checkedIndex]) @@ -176,8 +176,8 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, return nil, ErrEligibleSelectionMismatch } - cSize := ihgs.consensusGroupSize(shardId) - publicKeys = make([]string, cSize) + consensusSize := ihgs.consensusGroupSize(shardId) + publicKeys = make([]string, consensusSize) cnt := 0 for i := uint16(0); i < shardEligibleLen; i++ { @@ -190,12 +190,12 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, publicKeys[cnt] = string(ihgs.nodesMap[shardId][i].PubKey()) cnt++ - if cnt > cSize { + if cnt > consensusSize { return nil, ErrEligibleTooManySelections } } - if cnt < cSize { + if cnt < consensusSize { return nil, ErrEligibleTooFewSelections } From 9514ca06796e527b79cef56a034867ec29addc0a Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Fri, 16 Aug 2019 18:05:05 +0300 Subject: [PATCH 061/234] Merge Economics Development --- cmd/node/factory/structs.go | 6 +- cmd/node/main.go | 2 +- .../frontend/wallet/txInterception_test.go | 35 +---- .../block/executingMiniblocksSc_test.go | 36 ++++- .../block/executingMiniblocks_test.go | 3 +- .../smartContract/executingSCCalls_test.go | 53 ++++--- integrationTests/node/getAccount_test.go | 4 +- integrationTests/state/genesisState_test.go | 2 +- .../state/stateExecTransaction_test.go | 7 +- integrationTests/state/stateTrie_test.go | 27 ++-- integrationTests/testInitializer.go | 142 ++++++++++++++---- integrationTests/testProcessorNode.go | 44 +++++- integrationTests/testWalletAccount.go | 15 ++ .../block/interceptors/headerInterceptor.go | 39 +++-- process/coordinator/process.go | 32 ++-- process/transaction/process.go | 1 - 16 files changed, 287 insertions(+), 161 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 687a5696a90..133bd4617a1 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1266,6 +1266,7 @@ func newBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { @@ -1306,6 +1307,7 @@ func newShardBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { argsParser, err := smartContract.NewAtArgumentParser() @@ -1365,10 +1367,6 @@ func newShardBlockProcessorAndTracker( return nil, nil, process.ErrWrongTypeAssertion } - //TODO replace this with a vm factory - cryptoHook := hooks.NewVMCryptoHook() - ieleVM := endpoint.NewElrondIeleVM(vmAccountsDB, cryptoHook, endpoint.ElrondTestnet) - scProcessor, err := smartContract.NewSmartContractProcessor( vmContainer, argsParser, diff --git a/cmd/node/main.go b/cmd/node/main.go index 3f6b3fa9aee..28f0a5a9d25 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -1165,7 +1165,7 @@ func createNode( if err != nil { return nil, err } - err = nd.StartHeartbeat(config.Heartbeat) + err = nd.StartHeartbeat(config.Heartbeat, version, config.GeneralSettings.NodeDisplayName) if err != nil { return nil, err } diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 67677f1c0a4..1b76b5f9507 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -6,9 +6,6 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/core/mock" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/stretchr/testify/assert" @@ -101,31 +98,6 @@ func testInterceptedTxFromFrontendGeneratedParams( t.Skip("this is not a short test") } - dPool := createTestDataPool() - startingNonce := uint64(0) - - addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - accntAdapter := createAccountsDB() - - shardCoordinator := &sharding.OneShardCoordinator{} - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - mock.HasherMock{}, - 0, - 1, - make(map[uint32][]sharding.Validator), - ) - - n, _, sk, _ := createNetNode(dPool, accntAdapter, shardCoordinator, nodesCoordinator) - - //set the account's nonce to startingNonce - nodePubKeyBytes, _ := sk.GeneratePublic().ToByteArray() - nodeAddress, _ := addrConverter.CreateAddressFromPublicKeyBytes(nodePubKeyBytes) - nodeAccount, _ := accntAdapter.GetAccountWithJournal(nodeAddress) - _ = nodeAccount.(*state.Account).SetNonceWithJournal(startingNonce) - _, _ = accntAdapter.Commit() - chDone := make(chan struct{}) maxShards := uint32(1) @@ -133,7 +105,12 @@ func testInterceptedTxFromFrontendGeneratedParams( txSignPrivKeyShardId := uint32(0) initialNodeAddr := "nodeAddr" - node := integrationTests.NewTestProcessorNode(maxShards, nodeShardId, txSignPrivKeyShardId, initialNodeAddr) + node := integrationTests.NewTestProcessorNode( + maxShards, + nodeShardId, + txSignPrivKeyShardId, + initialNodeAddr, + ) txHexHash := "" diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index 29f544f765f..b2f85635446 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -36,17 +36,45 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - nodeShard0 := integrationTests.NewTestProcessorNode( - maxShards, + numMetaChainNodes := 1 + cp := integrationTests.CreateCryptoParams(1, numMetaChainNodes, maxShards) + keysMap := integrationTests.PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := integrationTests.GenValidatorsFromPubKeys(keysMap) + + nodesCoordinator0, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + integrationTests.TestHasher, 0, + maxShards, + validatorsMap, + ) + + nodeShard0 := integrationTests.NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards, 0, advertiserAddr, + nodesCoordinator0, + cp, + 0, ) - nodeShard1 := integrationTests.NewTestProcessorNode( - maxShards, + + nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator( + 1, 1, + integrationTests.TestHasher, + 1, + maxShards, + validatorsMap, + ) + + nodeShard1 := integrationTests.NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards, 1, advertiserAddr, + nodesCoordinator1, + cp, + 0, ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 3b4e9f422f8..89623ad5d09 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -74,7 +74,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } fmt.Println("Generating transactions...") - integrationTests.GenerateAndDisseminateTxs(proposerNode, sendersPrivateKeys, receiversPrivateKeys, valToTransferPerTx) + integrationTests.GenerateAndDisseminateTxs(proposerNode, sendersPrivateKeys, receiversPrivateKeys, + valToTransferPerTx, gasPricePerTx, gasLimitPerTx) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 5cc73d072ab..ec3fa973adf 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -104,12 +104,14 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] + proposerNodeShard1 := nodes[0][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -118,7 +120,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { senderAddressBytes := []byte("12345678901234567890123456789012") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, senderMintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, senderMintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -178,20 +180,22 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond generalRoundNumber := uint64(1) senderShard := uint32(0) receiverShard := uint32(1) - senderNonce := uint64(0) + senderNonce := uint64(1) mintingValue := big.NewInt(100000000) - receiverNonce := uint64(0) + receiverNonce := uint64(1) advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] - proposerNodeShard2 := nodes[1] + proposerNodeShard1 := nodes[0][0] + proposerNodeShard2 := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -212,7 +216,6 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond expectedValue := big.NewInt(0) expectedValue.Sub(mintingValue, big.NewInt(opGas*1)) assert.Equal(t, expectedValue, acc.Balance) - senderNonce++ assert.Equal(t, senderNonce, acc.Nonce) @@ -244,8 +247,6 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) assert.Equal(t, afterFee, acc.Balance) - assert.Equal(t, receiverNonce, acc.Nonce) - receiverNonce++ assert.Equal(t, receiverNonce, acc.Nonce) @@ -261,7 +262,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond storedVal, _ := scAccount.DataTrieTracker().RetrieveValue([]byte("a")) storedValBI := big.NewInt(0).SetBytes(storedVal) - assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable + addValue)), storedValBI) + assert.Equal(t, big.NewInt(int64(initialValueForInternalVariable+addValue)), storedValBI) } // Test within a network of two shards the following situation @@ -278,20 +279,22 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond generalRoundNumber := uint64(1) scShard := uint32(0) accShard := uint32(1) - accNonce := uint64(0) + accNonce := uint64(1) mintingValue := big.NewInt(100000000) - scNonce := uint64(0) + scNonce := uint64(1) advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShardSC := nodes[0] - proposerNodeShardAccount := nodes[1] + proposerNodeShardSC := nodes[0][0] + proposerNodeShardAccount := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -301,8 +304,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond accountShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, scShard, [][]byte{scAccountAddressBytes}, mintingValue) - createMintingForSenders(nodes, accShard, [][]byte{accountShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scAccountAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], accShard, [][]byte{accountShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShardSC, generalRoundNumber, scAccountAddressBytes, accNonce) @@ -312,16 +315,16 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond expectedValue := big.NewInt(0) expectedValue.Sub(mintingValue, big.NewInt(opGas*1)) assert.Equal(t, expectedValue, acc.Balance) - accNonce++ assert.Equal(t, accNonce, acc.Nonce) + accNonce++ generalRoundNumber++ // setting the sc deployment address (printed by the transaction processer) scDeploymentAdddress, _ := hex.DecodeString("ca26d3e6152af91949295cc89f419413e08aa04ba2d5e1ed2b199b2ca8aabc2a") // Update the SC account balance so we can call withdraw function - createMintingForSenders(nodes, scShard, [][]byte{scDeploymentAdddress}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scDeploymentAdddress}, mintingValue) // Now that the SC is deployed, we test a call from an account located in the second shard withdrawValue := uint64(100) @@ -336,7 +339,6 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond ) // The account shard should process this tx as MoveBalance - scNonce++ processAndTestSmartContractCallInSender( t, contractCallTx, @@ -346,6 +348,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond mintingValue, scNonce, ) + scNonce++ generalRoundNumber++ // After second shard processed the transaction, tx should get into the first shard where the SC resides diff --git a/integrationTests/node/getAccount_test.go b/integrationTests/node/getAccount_test.go index f190a56760f..748969753dd 100644 --- a/integrationTests/node/getAccount_test.go +++ b/integrationTests/node/getAccount_test.go @@ -13,7 +13,7 @@ import ( func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB := integrationTests.CreateAccountsDB(0) n, _ := node.NewNode( node.WithAccountsAdapter(accDB), @@ -32,7 +32,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB := integrationTests.CreateAccountsDB(0) addressHex := integrationTests.CreateRandomHexString(64) addressBytes, _ := hex.DecodeString(addressHex) diff --git a/integrationTests/state/genesisState_test.go b/integrationTests/state/genesisState_test.go index 34c8ba78aba..2b85fec5f5c 100644 --- a/integrationTests/state/genesisState_test.go +++ b/integrationTests/state/genesisState_test.go @@ -299,7 +299,7 @@ func printTestDebugLines( } func getRootHashByRunningInitialBalances(initialBalances []*sharding.InitialBalance) ([]byte, state.AccountsAdapter) { - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) uniformIndexes := make([]int, len(initialBalances)) for i := 0; i < len(initialBalances); i++ { diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index a171d651596..4e2a9a3737e 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -3,6 +3,9 @@ package state import ( "encoding/base64" "fmt" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/marshal" "math/big" "testing" @@ -122,7 +125,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) @@ -217,7 +220,7 @@ func testExecTransactionsMoreTxWithRevert( func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 325799355f5..42d71498786 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -152,7 +152,7 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { t.Parallel() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) wg := sync.WaitGroup{} wg.Add(2000) @@ -283,7 +283,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te //verifies that commit saves the new tries and that can be loaded back t.Parallel() - adb, _, mu := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -316,7 +316,8 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te assert.Nil(t, err) fmt.Printf("Data committed! Root: %v\n", base64.StdEncoding.EncodeToString(rootHash)) - tr, _ := trie.NewTrie(mu, integrationTests.TestMarshalizer, integrationTests.TestHasher) + store := integrationTests.CreateMemUnit() + tr, _ := trie.NewTrie(store, integrationTests.TestMarshalizer, integrationTests.TestHasher) adb, _ = state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) //reloading a new trie to test if data is inside @@ -348,7 +349,7 @@ func TestAccountsDB_CommitAnEmptyStateShouldWork(t *testing.T) { } }() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) hash, err := adb.Commit() @@ -418,7 +419,7 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -489,7 +490,7 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -561,7 +562,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -627,7 +628,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -702,7 +703,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -797,7 +798,7 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -850,7 +851,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -883,7 +884,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -1048,7 +1049,7 @@ func BenchmarkTxExecution(b *testing.B) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(b, err) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index a914d35c8bb..536c0dbdcfe 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" "math/big" "strings" "sync" @@ -189,20 +190,15 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage -func CreateAccountsDB(shardCoordinator sharding.Coordinator) (*state.AccountsDB, data.Trie, storage.Storer) { - - var accountFactory state.AccountFactory - if shardCoordinator == nil { - accountFactory = factory.NewAccountCreator() - } else { - accountFactory, _ = factory.NewAccountFactoryCreator(shardCoordinator) - } - +func CreateAccountsDB(accountType factory.Type) *state.AccountsDB { + hasher := sha256.Sha256{} store := CreateMemUnit() - tr, _ := trie.NewTrie(store, TestMarshalizer, TestHasher) - adb, _ := state.NewAccountsDB(tr, TestHasher, TestMarshalizer, accountFactory) - return adb, tr, store + tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) + + return adb } // CreateShardChain creates a blockchain implementation used by the shard nodes @@ -366,7 +362,7 @@ func CreateRandomHexString(chars int) string { // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { adr := CreateRandomAddress() - adb, _, _ := CreateAccountsDB(nil) + adb := CreateAccountsDB(factory.UserAccount) account, _ := state.NewAccount(adr, adb) return adr, account, adb @@ -421,7 +417,8 @@ func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.I // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor(accnts, TestHasher, TestAddressConverter, TestMarshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := txProc.NewTxProcessor(accnts, TestHasher, TestAddressConverter, TestMarshalizer, + shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}) return txProcessor } @@ -656,18 +653,54 @@ func CreateNodes( //first node generated will have is pk belonging to firstSkShardId nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + cp := CreateCryptoParams(nodesPerShard, numMetaChainNodes, uint32(numOfShards)) + keysMap := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(keysMap) + idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + TestHasher, + uint32(shardId), + uint32(numOfShards), + validatorsMap, + ) + for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), uint32(shardId), uint32(shardId), serviceID) + n := NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(numOfShards), + uint32(shardId), + serviceID, + nodesCoordinator, + cp, + j, + ) nodes[idx] = n idx++ } } + nodesCoordinatorMeta, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + TestHasher, + uint32(sharding.MetachainShardId), + uint32(numOfShards), + validatorsMap, + ) for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) + metaNode := NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(numOfShards), + sharding.MetachainShardId, + serviceID, + nodesCoordinatorMeta, + cp, + i, + ) + idx := i + numOfShards*nodesPerShard nodes[idx] = metaNode } @@ -700,6 +733,8 @@ func GenerateAndDisseminateTxs( senders []crypto.PrivateKey, receiversPrivateKeys map[uint32][]crypto.PrivateKey, valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { for i := 0; i < len(senders); i++ { @@ -707,16 +742,7 @@ func GenerateAndDisseminateTxs( incrementalNonce := uint64(0) for _, recvPrivateKeys := range receiversPrivateKeys { receiverKey := recvPrivateKeys[i] - tx := generateTx( - senderKey, - n.OwnAccount.SingleSigner, - &txArgs{ - nonce: incrementalNonce, - value: valToTransfer, - rcvAddr: skToPk(receiverKey), - sndAddr: skToPk(senderKey), - }, - ) + tx := generateTransferTx(incrementalNonce, senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) _, _ = n.SendTransaction(tx) incrementalNonce++ } @@ -733,6 +759,31 @@ type txArgs struct { gasLimit int } +func generateTransferTx( + nonce uint64, + sender crypto.PrivateKey, + receiver crypto.PrivateKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) *transaction.Transaction { + + tx := transaction.Transaction{ + Nonce: nonce, + Value: valToTransfer, + RcvAddr: skToPk(receiver), + SndAddr: skToPk(sender), + Data: "", + GasLimit: gasLimit, + GasPrice: gasPrice, + } + txBuff, _ := TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(sender, txBuff) + + return &tx +} + func generateTx( skSign crypto.PrivateKey, signer crypto.SingleSigner, @@ -974,7 +1025,7 @@ func generateValidTx( _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) pkRecvBuff, _ := pkRecv.ToByteArray() - accnts, _, _ := CreateAccountsDB(shardCoordinator) + accnts := CreateAccountsDB(factory.UserAccount) addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) _, _ = accnts.GetAccountWithJournal(addrSender) _, _ = accnts.Commit() @@ -1111,7 +1162,7 @@ func CreateValidatorKeys(nodesPerShard int, nbMetaNodes int, nbShards int) map[u return keysMap } -func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { +func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { keysMap := make(map[uint32][]string, 0) for shardId, pairList := range keyPairMap { @@ -1126,7 +1177,7 @@ func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]st return keysMap } -func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { +func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { validatorsMap := make(map[uint32][]sharding.Validator) for shardId, shardNodesPks := range pubKeysMap { @@ -1140,3 +1191,36 @@ func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard return validatorsMap } + +func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := uint32(0); shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &CryptoParams{ + Keys: keysMap, + KeyGen: keyGen, + SingleSigner: singleSigner, + } + + return params +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index f439931ef77..2ea2e5a4639 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/crypto" "sync/atomic" "time" @@ -15,7 +16,6 @@ import ( dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - accountfactory "github.com/ElrondNetwork/elrond-go/data/state/factory" dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -58,12 +58,19 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() -// TestKeyPair holds a pair of private/public keys +// TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { sk crypto.PrivateKey pk crypto.PublicKey } +//CryptoParams holds crypto parametres +type CryptoParams struct { + KeyGen crypto.KeyGenerator + Keys map[uint32][]*TestKeyPair + SingleSigner crypto.SingleSigner +} + // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { @@ -136,6 +143,35 @@ func NewTestProcessorNode( return tpn } +// NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator +func NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, +) *TestProcessorNode { + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + sk := cp.Keys[nodeShardId][keyIndex].sk + pk := cp.Keys[nodeShardId][keyIndex].pk + + tpn.OwnAccount = CreateTestWalletAccountWithSkPk(sk, pk, cp.KeyGen) + tpn.initDataPools() + tpn.initTestNode() + + return tpn +} + // NewTestProcessorNodeWithCustomDataPool returns a new TestProcessorNode instance with the given data pool func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string, dPool dataRetriever.PoolsHolder) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) @@ -159,7 +195,7 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 func (tpn *TestProcessorNode) initTestNode() { tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) tpn.initInterceptors() @@ -541,7 +577,7 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod blockHeader.SetRound(round) blockHeader.SetNonce(nonce) - blockHeader.SetPubKeysBitmap(make([]byte, 0)) + blockHeader.SetPubKeysBitmap([]byte{1}) sig, _ := TestMultiSig.AggregateSigs(nil) blockHeader.SetSignature(sig) currHdr := tpn.BlockChain.GetCurrentBlockHeader() diff --git a/integrationTests/testWalletAccount.go b/integrationTests/testWalletAccount.go index 6984fa40248..4acd810dc71 100644 --- a/integrationTests/testWalletAccount.go +++ b/integrationTests/testWalletAccount.go @@ -31,6 +31,21 @@ func CreateTestWalletAccount(coordinator sharding.Coordinator, shardId uint32) * return testWalletAccount } +// CreateTestWalletAccount creates an wallett account in a selected shard +func CreateTestWalletAccountWithSkPk(sk crypto.PrivateKey, pk crypto.PublicKey, keyGen crypto.KeyGenerator) *TestWalletAccount { + testWalletAccount := &TestWalletAccount{} + + testWalletAccount.SingleSigner = &singlesig.SchnorrSigner{} + + testWalletAccount.SkTxSign = sk + testWalletAccount.PkTxSign = pk + testWalletAccount.PkTxSignBytes, _ = pk.ToByteArray() + testWalletAccount.KeygenTxSign = keyGen + testWalletAccount.Address, _ = TestAddressConverter.CreateAddressFromPublicKeyBytes(testWalletAccount.PkTxSignBytes) + + return testWalletAccount +} + // initCrypto initializes the crypto for the account func (twa *TestWalletAccount) initCrypto(coordinator sharding.Coordinator, shardId uint32) { twa.SingleSigner = &singlesig.SchnorrSigner{} diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go index c365586c598..d69958137d1 100644 --- a/process/block/interceptors/headerInterceptor.go +++ b/process/block/interceptors/headerInterceptor.go @@ -15,15 +15,14 @@ import ( // HeaderInterceptor represents an interceptor used for block headers type HeaderInterceptor struct { - marshalizer marshal.Marshalizer - storer storage.Storer - multiSigVerifier crypto.MultiSigVerifier - hasher hashing.Hasher - chronologyValidator process.ChronologyValidator - headers storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher - headerValidator process.HeaderValidator - shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + storer storage.Storer + multiSigVerifier crypto.MultiSigVerifier + hasher hashing.Hasher + headers storage.Cacher + headersNonces dataRetriever.Uint64SyncMapCacher + headerValidator process.HeaderValidator + shardCoordinator sharding.Coordinator nodesCoordinator sharding.NodesCoordinator } @@ -61,23 +60,19 @@ func NewHeaderInterceptor( if shardCoordinator == nil { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil { - return nil, process.ErrNilChronologyValidator - } if nodesCoordinator == nil { return nil, process.ErrNilNodesCoordinator } hdrInterceptor := &HeaderInterceptor{ - marshalizer: marshalizer, - multiSigVerifier: multiSigVerifier, - hasher: hasher, - shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, - headers: headers, - headersNonces: headersNonces, - headerValidator: headerValidator, - nodesCoordinator: nodesCoordinator + marshalizer: marshalizer, + multiSigVerifier: multiSigVerifier, + hasher: hasher, + shardCoordinator: shardCoordinator, + headers: headers, + headersNonces: headersNonces, + headerValidator: headerValidator, + nodesCoordinator: nodesCoordinator, } return hdrInterceptor, nil @@ -93,7 +88,7 @@ func (hi *HeaderInterceptor) ParseReceivedMessage(message p2p.MessageP2P) (*bloc return nil, process.ErrNilDataToProcess } - hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.chronologyValidator) + hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.nodesCoordinator, hi.marshalizer, hi.hasher) err := hi.marshalizer.Unmarshal(hdrIntercepted, message.Data()) if err != nil { return nil, err diff --git a/process/coordinator/process.go b/process/coordinator/process.go index c1e9511e70e..846a347c976 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -528,40 +528,26 @@ func (tc *transactionCoordinator) addTxFeeToMatchingMiniBlocks(miniBlocks *block func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { miniBlocks := make(block.MiniBlockSlice, 0) + // processing has to be done in order, as the order of different type of transactions over the same account is strict // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { + if blockType == block.TxFeeBlock { + // this has to be processed last + continue + } + interimProc := tc.getInterimProcessor(blockType) if interimProc == nil { // this will never be reached as keysInterimProcs are the actual keys from the interimMap continue } - resMutex := sync.Mutex{} - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) - - for key, interimProc := range tc.interimProcessors { - if key == block.TxFeeBlock { - // this has to be processed last - wg.Done() - continue + currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { + miniBlocks = append(miniBlocks, value) } - - go func(intermediateProcessor process.IntermediateTransactionHandler) { - currMbs := intermediateProcessor.CreateAllInterMiniBlocks() - resMutex.Lock() - for _, value := range currMbs { - miniBlocks = append(miniBlocks, value) - } - resMutex.Unlock() - wg.Done() - }(interimProc) } - wg.Wait() - tc.mutInterimProcessors.RUnlock() - return miniBlocks } diff --git a/process/transaction/process.go b/process/transaction/process.go index 33c9e5ff8bb..5f1565aee27 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -100,7 +100,6 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return err } - txType, err := txProc.scProcessor.ComputeTransactionType(tx) txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) if err != nil { return err From 5d4373cdf25b53cf4682862a0202e8645acc5da0 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 19 Aug 2019 16:51:26 +0300 Subject: [PATCH 062/234] Merged Economics and Development --- integrationTests/mock/nodesCoordinatorMock.go | 8 --- .../block/executingMiniblocksSc_test.go | 50 ++------------ .../block/executingMiniblocks_test.go | 6 +- .../smartContract/executingSCCalls_test.go | 13 ++-- integrationTests/node/getAccount_test.go | 4 +- .../block/executingMiniblocks_test.go | 2 +- .../block/interceptedRequestHdr_test.go | 12 ++-- integrationTests/state/genesisState_test.go | 2 +- .../state/stateExecTransaction_test.go | 67 +++---------------- integrationTests/state/stateTrie_test.go | 27 ++++---- integrationTests/testInitializer.go | 48 ++----------- integrationTests/testProcessorNode.go | 5 +- node/heartbeat/hearbeatMessageInfo_test.go | 6 +- .../interceptors/headerInterceptor_test.go | 63 ++++++++--------- .../metachainHeaderInterceptor_test.go | 8 +-- process/smartContract/process.go | 4 ++ process/smartContract/process_test.go | 2 +- 17 files changed, 103 insertions(+), 224 deletions(-) diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 8e08aa158e8..ccb3f0e275c 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -41,18 +41,10 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]st return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { - panic("implement me") -} - func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { - panic("implement me") -} - func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { panic("implement me") } diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index b2f85635446..af2990ddf1f 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -36,57 +36,15 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - numMetaChainNodes := 1 - cp := integrationTests.CreateCryptoParams(1, numMetaChainNodes, maxShards) - keysMap := integrationTests.PubKeysMapFromKeysMap(cp.Keys) - validatorsMap := integrationTests.GenValidatorsFromPubKeys(keysMap) - - nodesCoordinator0, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - integrationTests.TestHasher, - 0, - maxShards, - validatorsMap, - ) - - nodeShard0 := integrationTests.NewTestProcessorNodeWithCustomNodesCoordinator( - maxShards, - 0, - advertiserAddr, - nodesCoordinator0, - cp, - 0, - ) - - nodesCoordinator1, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - integrationTests.TestHasher, - 1, - maxShards, - validatorsMap, - ) - - nodeShard1 := integrationTests.NewTestProcessorNodeWithCustomNodesCoordinator( - maxShards, - 1, - advertiserAddr, - nodesCoordinator1, - cp, - 0, - ) + nodeShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) + nodeShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") nodeShard1.LoadTxSignSkBytes(hardCodedSk) - nodeMeta := integrationTests.NewTestProcessorNode( - maxShards, - sharding.MetachainShardId, - 0, - advertiserAddr, - ) + nodeMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) nodes := []*integrationTests.TestProcessorNode{nodeShard0, nodeShard1, nodeMeta} + idxNodeShard0 := 0 idxNodeShard1 := 1 idxNodeMeta := 2 diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 89623ad5d09..92fbcc1a11e 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -89,6 +89,10 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) } + gasPricePerTxBigInt := big.NewInt(int64(gasPricePerTx)) + gasLimitPerTxBigInt := big.NewInt(int64(gasLimitPerTx)) + gasValue := big.NewInt(0).Mul(gasPricePerTxBigInt, gasLimitPerTxBigInt) + totalValuePerTx := big.NewInt(0).Add(gasValue, valToTransferPerTx) fmt.Println("Test nodes from proposer shard to have the correct balances...") for _, n := range nodes { isNodeInSenderShard := n.ShardCoordinator.SelfId() == senderShard @@ -98,7 +102,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //test sender balances for _, sk := range sendersPrivateKeys { - valTransferred := big.NewInt(0).Mul(valToTransferPerTx, big.NewInt(int64(len(receiversPrivateKeys)))) + valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPrivateKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index ec3fa973adf..dd34bdf7e1d 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -180,9 +180,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond generalRoundNumber := uint64(1) senderShard := uint32(0) receiverShard := uint32(1) - senderNonce := uint64(1) + senderNonce := uint64(0) mintingValue := big.NewInt(100000000) - receiverNonce := uint64(1) + receiverNonce := uint64(0) advertiser, nodes := createScCallsNodes() defer func() { @@ -279,9 +279,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond generalRoundNumber := uint64(1) scShard := uint32(0) accShard := uint32(1) - accNonce := uint64(1) + accNonce := uint64(0) mintingValue := big.NewInt(100000000) - scNonce := uint64(1) + scNonce := uint64(0) advertiser, nodes := createScCallsNodes() defer func() { @@ -315,9 +315,9 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond expectedValue := big.NewInt(0) expectedValue.Sub(mintingValue, big.NewInt(opGas*1)) assert.Equal(t, expectedValue, acc.Balance) + accNonce++ assert.Equal(t, accNonce, acc.Nonce) - accNonce++ generalRoundNumber++ // setting the sc deployment address (printed by the transaction processer) @@ -338,6 +338,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond withdrawValue, ) + scNonce++ // The account shard should process this tx as MoveBalance processAndTestSmartContractCallInSender( t, @@ -348,7 +349,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond mintingValue, scNonce, ) - scNonce++ + generalRoundNumber++ // After second shard processed the transaction, tx should get into the first shard where the SC resides diff --git a/integrationTests/node/getAccount_test.go b/integrationTests/node/getAccount_test.go index 748969753dd..4cb51e19e6f 100644 --- a/integrationTests/node/getAccount_test.go +++ b/integrationTests/node/getAccount_test.go @@ -13,7 +13,7 @@ import ( func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { t.Parallel() - accDB := integrationTests.CreateAccountsDB(0) + accDB, _, _ := integrationTests.CreateAccountsDB(0) n, _ := node.NewNode( node.WithAccountsAdapter(accDB), @@ -32,7 +32,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { t.Parallel() - accDB := integrationTests.CreateAccountsDB(0) + accDB, _, _ := integrationTests.CreateAccountsDB(0) addressHex := integrationTests.CreateRandomHexString(64) addressBytes, _ := hex.DecodeString(addressHex) diff --git a/integrationTests/singleShard/block/executingMiniblocks_test.go b/integrationTests/singleShard/block/executingMiniblocks_test.go index 4b1b339574f..1e2b9bd728b 100644 --- a/integrationTests/singleShard/block/executingMiniblocks_test.go +++ b/integrationTests/singleShard/block/executingMiniblocks_test.go @@ -58,7 +58,7 @@ func TestShardShouldNotProposeAndExecuteTwoBlocksInSameRound(t *testing.T) { integrationTests.SyncBlock(t, nodes, []int{idxProposer}, nonce) - time.Sleep(stepDelay) + time.Sleep(20 * stepDelay) checkCurrentBlockHeight(t, nodes, nonce) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index 73f4ed9ba32..5d9d5716920 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -70,6 +70,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { hdr2 := block.Header{ Nonce: 0, + PubKeysBitmap: []byte{255, 0}, + Signature: []byte("signature"), PrevHash: []byte("prev hash"), TimeStamp: uint64(time.Now().Unix()), Round: 1, @@ -115,9 +117,11 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { wg.Done() } - if reflect.DeepEqual(hdrStored, &hdr2) && hdr2.Signature != nil { - fmt.Printf("Recieved header with hash %v\n", base64.StdEncoding.EncodeToString(key)) - wg.Done() + if reflect.DeepEqual(hdrStored, &hdr2) { + if hdr2.Signature != nil { + fmt.Printf("Recieved header with hash %v\n", base64.StdEncoding.EncodeToString(key)) + wg.Done() + } } }) @@ -132,7 +136,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { select { case <-chanDone: - case <-time.After(time.Second * 10): + case <-time.After(time.Second * 1000): assert.Fail(t, "timeout") } } diff --git a/integrationTests/state/genesisState_test.go b/integrationTests/state/genesisState_test.go index 2b85fec5f5c..55760a98e8a 100644 --- a/integrationTests/state/genesisState_test.go +++ b/integrationTests/state/genesisState_test.go @@ -299,7 +299,7 @@ func printTestDebugLines( } func getRootHashByRunningInitialBalances(initialBalances []*sharding.InitialBalance) ([]byte, state.AccountsAdapter) { - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) uniformIndexes := make([]int, len(initialBalances)) for i := 0; i < len(initialBalances); i++ { diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index 4e2a9a3737e..b903b2f5553 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -3,9 +3,6 @@ package state import ( "encoding/base64" "fmt" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/marshal" "math/big" "testing" @@ -22,26 +19,8 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { t.Skip("this is not a short test") } - accnts, _ := adbCreateAccountsDBWithStorage() - - pubKeyBuff := createDummyHexAddress(64) - - hasher := sha256.Sha256{} - marshalizer := &marshal.JsonMarshalizer{} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - - txProcessor, _ := transaction.NewTxProcessor( - accnts, - hasher, - addrConv, - marshalizer, - shardCoordinator, - &mock.SCProcessorMock{}, - &mock.UnsignedTxHandlerMock{}, - &mock.TxTypeHandlerMock{}, - ) - + accnts, _, _ := integrationTests.CreateAccountsDB(0) + txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) nonce := uint64(6) balance := big.NewInt(10000) @@ -50,7 +29,7 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { hashCreated, _ := accnts.Commit() //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff - tx := &transaction2.Transaction{ + tx := &transaction.Transaction{ Nonce: nonce, Value: big.NewInt(1), GasLimit: 2, @@ -75,25 +54,8 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _ := adbCreateAccountsDBWithStorage() - - pubKeyBuff := createDummyHexAddress(64) - - hasher := sha256.Sha256{} - marshalizer := &marshal.JsonMarshalizer{} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - addrConv, _ := addressConverters.NewPlainAddressConverter(32, "0x") - - txProcessor, _ := transaction.NewTxProcessor( - accnts, - hasher, - addrConv, - marshalizer, - shardCoordinator, - &mock.SCProcessorMock{}, - &mock.UnsignedTxHandlerMock{}, - &mock.TxTypeHandlerMock{}, - ) + accnts, _, _ := integrationTests.CreateAccountsDB(0) + txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) nonce := uint64(6) balance := big.NewInt(10000) @@ -103,7 +65,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { _, _ = accnts.Commit() //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff - tx := &transaction2.Transaction{ + tx := &transaction.Transaction{ Nonce: nonce, Value: big.NewInt(1), SndAddr: address.Bytes(), @@ -125,7 +87,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts := integrationTests.CreateAccountsDB(0) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) @@ -150,16 +112,7 @@ func testExecTransactionsMoreTxWithRevert( initialBalance int64, ) { - txProcessor, _ := transaction.NewTxProcessor( - accnts, - hasher, - addrConv, - marshalizer, - shardCoordinator, - &mock.SCProcessorMock{}, - &mock.UnsignedTxHandlerMock{}, - &mock.TxTypeHandlerMock{}, - ) + txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) txToGenerate := 15000 gasPrice := uint64(2) @@ -167,7 +120,7 @@ func testExecTransactionsMoreTxWithRevert( value := uint64(1) //Step 1. execute a lot moving transactions from pubKeyBuff to another pubKeyBuff for i := 0; i < txToGenerate; i++ { - tx := &transaction2.Transaction{ + tx := &transaction.Transaction{ Nonce: initialNonce + uint64(i), Value: big.NewInt(int64(value)), GasPrice: gasPrice, @@ -220,7 +173,7 @@ func testExecTransactionsMoreTxWithRevert( func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts := integrationTests.CreateAccountsDB(0) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 42d71498786..33cdef8868b 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -152,7 +152,7 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { t.Parallel() - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) wg := sync.WaitGroup{} wg.Add(2000) @@ -283,7 +283,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te //verifies that commit saves the new tries and that can be loaded back t.Parallel() - adb := integrationTests.CreateAccountsDB(0) + adb, _, mu := integrationTests.CreateAccountsDB(0) adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -316,8 +316,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te assert.Nil(t, err) fmt.Printf("Data committed! Root: %v\n", base64.StdEncoding.EncodeToString(rootHash)) - store := integrationTests.CreateMemUnit() - tr, _ := trie.NewTrie(store, integrationTests.TestMarshalizer, integrationTests.TestHasher) + tr, _ := trie.NewTrie(mu, integrationTests.TestMarshalizer, integrationTests.TestHasher) adb, _ = state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) //reloading a new trie to test if data is inside @@ -349,7 +348,7 @@ func TestAccountsDB_CommitAnEmptyStateShouldWork(t *testing.T) { } }() - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) hash, err := adb.Commit() @@ -419,7 +418,7 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -490,7 +489,7 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -562,7 +561,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -628,7 +627,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -703,7 +702,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -798,7 +797,7 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -851,7 +850,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -884,7 +883,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -1049,7 +1048,7 @@ func BenchmarkTxExecution(b *testing.B) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb := integrationTests.CreateAccountsDB(0) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(b, err) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 536c0dbdcfe..92604ad15e1 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -190,7 +190,7 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage -func CreateAccountsDB(accountType factory.Type) *state.AccountsDB { +func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { hasher := sha256.Sha256{} store := CreateMemUnit() @@ -198,7 +198,7 @@ func CreateAccountsDB(accountType factory.Type) *state.AccountsDB { accountFactory, _ := factory.NewAccountFactoryCreator(accountType) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) - return adb + return adb, tr, store } // CreateShardChain creates a blockchain implementation used by the shard nodes @@ -362,7 +362,7 @@ func CreateRandomHexString(chars int) string { // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { adr := CreateRandomAddress() - adb := CreateAccountsDB(factory.UserAccount) + adb, _, _ := CreateAccountsDB(factory.UserAccount) account, _ := state.NewAccount(adr, adb) return adr, account, adb @@ -653,54 +653,18 @@ func CreateNodes( //first node generated will have is pk belonging to firstSkShardId nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - cp := CreateCryptoParams(nodesPerShard, numMetaChainNodes, uint32(numOfShards)) - keysMap := PubKeysMapFromKeysMap(cp.Keys) - validatorsMap := GenValidatorsFromPubKeys(keysMap) - idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - TestHasher, - uint32(shardId), - uint32(numOfShards), - validatorsMap, - ) - for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNodeWithCustomNodesCoordinator( - uint32(numOfShards), - uint32(shardId), - serviceID, - nodesCoordinator, - cp, - j, - ) + n := NewTestProcessorNode(uint32(numOfShards), uint32(shardId), uint32(shardId), serviceID) nodes[idx] = n idx++ } } - nodesCoordinatorMeta, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - TestHasher, - uint32(sharding.MetachainShardId), - uint32(numOfShards), - validatorsMap, - ) for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNodeWithCustomNodesCoordinator( - uint32(numOfShards), - sharding.MetachainShardId, - serviceID, - nodesCoordinatorMeta, - cp, - i, - ) - + metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) idx := i + numOfShards*nodesPerShard nodes[idx] = metaNode } @@ -1025,7 +989,7 @@ func generateValidTx( _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) pkRecvBuff, _ := pkRecv.ToByteArray() - accnts := CreateAccountsDB(factory.UserAccount) + accnts, _, _ := CreateAccountsDB(factory.UserAccount) addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) _, _ = accnts.GetAccountWithJournal(addrSender) _, _ = accnts.Commit() diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 2ea2e5a4639..9c62a297ce1 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -177,9 +177,12 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + nodesCoordinator := &mock.NodesCoordinatorMock{} + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) @@ -195,7 +198,7 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 func (tpn *TestProcessorNode) initTestNode() { tpn.initStorage() - tpn.AccntState = CreateAccountsDB(0) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) tpn.initInterceptors() diff --git a/node/heartbeat/hearbeatMessageInfo_test.go b/node/heartbeat/hearbeatMessageInfo_test.go index 103374b067b..686a61943bd 100644 --- a/node/heartbeat/hearbeatMessageInfo_test.go +++ b/node/heartbeat/hearbeatMessageInfo_test.go @@ -1,6 +1,7 @@ package heartbeat import ( + "sync" "testing" "time" @@ -72,10 +73,12 @@ func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) incrementalTime := int64(0) + mut := sync.Mutex{} hbmi.getTimeHandler = func() time.Time { + mut.Lock() tReturned := time.Unix(0, incrementalTime) incrementalTime += 1 - + mut.Unlock() return tReturned } @@ -83,6 +86,7 @@ func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { // send heartbeat twice in order to calculate the duration between thm hbmi.HeartbeatReceived(uint32(1), "v0.1", "undefined") + time.Sleep(10 * time.Millisecond) hbmi.HeartbeatReceived(uint32(2), "v0.1", "undefined") assert.True(t, hbmi.totalUpTime.Duration > time.Duration(0)) diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 14574f43e2f..1740a69def5 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -128,7 +128,7 @@ func TestNewHeaderInterceptor_NilHeaderHandlerValidatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHeaderHandlerValidator, err) @@ -150,7 +150,7 @@ func TestNewHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilMultiSigVerifier, err) @@ -172,7 +172,7 @@ func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -194,7 +194,7 @@ func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -219,7 +219,7 @@ func TestNewHeaderInterceptor_NilChronologyValidatorShouldErr(t *testing.T) { nil, ) - assert.Equal(t, process.ErrNilChronologyValidator, err) + assert.Equal(t, process.ErrNilNodesCoordinator, err) assert.Nil(t, hi) } @@ -263,7 +263,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilMessageShouldErr(t *testing.T) mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) hdr, err := hi.ParseReceivedMessage(nil) @@ -287,7 +287,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilDataToProcessShouldErr(t *test mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{} @@ -318,7 +318,7 @@ func TestHeaderInterceptor_ParseReceivedMessageMarshalizerErrorsAtUnmarshalingSh mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{ @@ -338,11 +338,9 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -350,12 +348,12 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -381,11 +379,9 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -393,16 +389,16 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -536,11 +532,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headerValidator := &mock.HeaderValidatorStub{ IsHeaderValidForProcessingCalled: func(headerHandler data.HeaderHandler) bool { @@ -549,6 +540,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { } hdrsNonces := &mock.Uint64SyncMapCacherStub{} + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -556,16 +549,16 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { hdrsNonces, headerValidator, multisigner, - mock.HasherMock{}, - mock.NewMultiShardsCoordinatorMock(2), - chronologyValidator, + hasher, + mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index d026b933469..7803d21b516 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -166,13 +166,13 @@ func TestNewMetachainHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T t.Parallel() metachainHeaders := &mock.CacherStub{} - metachainStorer := &mock.StorerStub{} + headerValidator := &mock.HeaderValidatorStub{} mhi, err := interceptors.NewMetachainHeaderInterceptor( &mock.MarshalizerMock{}, metachainHeaders, &mock.Uint64SyncMapCacherStub{}, - metachainStorer, + headerValidator, mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), @@ -322,7 +322,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te }, } multisigner := mock.NewMultiSigner() - nodesCoordinator := createNodesCoordinator() + nodesCoordinator := &mock.NodesCoordinatorMock{} mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, @@ -406,7 +406,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd }, } - nodesCoordinator := createNodesCoordinator() + nodesCoordinator := &mock.NodesCoordinatorMock{} mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 260cb91cf5d..2f62c259686 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -352,6 +352,10 @@ func (sc *scProcessor) processSCPayment(tx *transaction.Transaction, acntSnd sta return process.ErrWrongTypeAssertion } + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + totalCost := big.NewInt(0) err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) if err != nil { diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 5225dfad751..262dfa36667 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -1426,7 +1426,7 @@ func TestScProcessor_ProcessSCPaymentNotEnoughBalance(t *testing.T) { t.Parallel() sc, err := NewSmartContractProcessor( - &mock.VMExecutionHandlerStub{}, + &mock.VMContainerMock{}, &mock.ArgumentParserMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, From ef336fe3946617cda5de8922503b534c96292891 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 19 Aug 2019 17:44:09 +0300 Subject: [PATCH 063/234] Merged Economics and Development --- .../singleShard/block/executingMiniblocks_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/integrationTests/singleShard/block/executingMiniblocks_test.go b/integrationTests/singleShard/block/executingMiniblocks_test.go index 1e2b9bd728b..673d103cd8d 100644 --- a/integrationTests/singleShard/block/executingMiniblocks_test.go +++ b/integrationTests/singleShard/block/executingMiniblocks_test.go @@ -3,6 +3,7 @@ package block import ( "context" "fmt" + "runtime" "testing" "time" @@ -58,7 +59,7 @@ func TestShardShouldNotProposeAndExecuteTwoBlocksInSameRound(t *testing.T) { integrationTests.SyncBlock(t, nodes, []int{idxProposer}, nonce) - time.Sleep(20 * stepDelay) + time.Sleep(stepDelay) checkCurrentBlockHeight(t, nodes, nonce) @@ -72,6 +73,7 @@ func TestShardShouldNotProposeAndExecuteTwoBlocksInSameRound(t *testing.T) { mockTestingT := &testing.T{} integrationTests.SyncBlock(mockTestingT, nodes, []int{idxProposer}, nonce) + runtime.Gosched() time.Sleep(stepDelay) checkCurrentBlockHeight(t, nodes, nonce-1) From 401471fff8834ee1e0d5e7437ae883c6a30dd9f5 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 19 Aug 2019 17:44:51 +0300 Subject: [PATCH 064/234] Merged Economics and Development --- integrationTests/singleShard/block/executingMiniblocks_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/integrationTests/singleShard/block/executingMiniblocks_test.go b/integrationTests/singleShard/block/executingMiniblocks_test.go index 673d103cd8d..4b1b339574f 100644 --- a/integrationTests/singleShard/block/executingMiniblocks_test.go +++ b/integrationTests/singleShard/block/executingMiniblocks_test.go @@ -3,7 +3,6 @@ package block import ( "context" "fmt" - "runtime" "testing" "time" @@ -73,7 +72,6 @@ func TestShardShouldNotProposeAndExecuteTwoBlocksInSameRound(t *testing.T) { mockTestingT := &testing.T{} integrationTests.SyncBlock(mockTestingT, nodes, []int{idxProposer}, nonce) - runtime.Gosched() time.Sleep(stepDelay) checkCurrentBlockHeight(t, nodes, nonce-1) From c3ad81374158ef622880c6447667167a73c08c01 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 19 Aug 2019 17:47:46 +0300 Subject: [PATCH 065/234] Merged Economics and Development --- .../singleShard/block/interceptedRequestHdr_test.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index d749ef69311..deda4f75ba8 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -117,11 +117,9 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { wg.Done() } - if reflect.DeepEqual(hdrStored, &hdr2) { - if hdr2.Signature != nil { - fmt.Printf("Recieved header with hash %v\n", base64.StdEncoding.EncodeToString(key)) - wg.Done() - } + if reflect.DeepEqual(hdrStored, &hdr2) && hdr2.Signature != nil { + fmt.Printf("Recieved header with hash %v\n", base64.StdEncoding.EncodeToString(key)) + wg.Done() } }) From 1e2dcb17b9ea74bfb6178c9e197773a55b649feb Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Tue, 20 Aug 2019 10:32:11 +0300 Subject: [PATCH 066/234] Merged Economics and Development --- .../multiShard/block/executingMiniblocks_test.go | 10 ++++++++-- integrationTests/testInitializer.go | 16 ++++++++++++---- integrationTests/testProcessorNode.go | 2 +- node/heartbeat/hearbeatMessageInfo_test.go | 4 ---- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 92fbcc1a11e..030e7622e48 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -74,8 +74,14 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } fmt.Println("Generating transactions...") - integrationTests.GenerateAndDisseminateTxs(proposerNode, sendersPrivateKeys, receiversPrivateKeys, - valToTransferPerTx, gasPricePerTx, gasLimitPerTx) + integrationTests.GenerateAndDisseminateTxs( + proposerNode, + sendersPrivateKeys, + receiversPrivateKeys, + valToTransferPerTx, + gasPricePerTx, + gasLimitPerTx, + ) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 92604ad15e1..24ffe895742 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -417,8 +417,16 @@ func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.I // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor(accnts, TestHasher, TestAddressConverter, TestMarshalizer, - shardCoordinator, &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}) + txProcessor, _ := txProc.NewTxProcessor( + accnts, + TestHasher, + TestAddressConverter, + TestMarshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) return txProcessor } @@ -654,9 +662,9 @@ func CreateNodes( nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) idx := 0 - for shardId := 0; shardId < numOfShards; shardId++ { + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), uint32(shardId), uint32(shardId), serviceID) + n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) nodes[idx] = n idx++ diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 9c62a297ce1..51620291b99 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/crypto" "sync/atomic" "time" @@ -12,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" diff --git a/node/heartbeat/hearbeatMessageInfo_test.go b/node/heartbeat/hearbeatMessageInfo_test.go index 686a61943bd..a0d337ad06d 100644 --- a/node/heartbeat/hearbeatMessageInfo_test.go +++ b/node/heartbeat/hearbeatMessageInfo_test.go @@ -1,7 +1,6 @@ package heartbeat import ( - "sync" "testing" "time" @@ -73,12 +72,9 @@ func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) incrementalTime := int64(0) - mut := sync.Mutex{} hbmi.getTimeHandler = func() time.Time { - mut.Lock() tReturned := time.Unix(0, incrementalTime) incrementalTime += 1 - mut.Unlock() return tReturned } From 9898ab23445a4d712703c4ad57e143d1d0b71174 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 21 Aug 2019 19:27:31 +0300 Subject: [PATCH 067/234] integrationTests: add integration tests for signature verification on interceptors --- cmd/node/main.go | 6 +- integrationTests/mock/keyMock.go | 64 ++++++ .../interceptedHeadersSigVerification_test.go | 158 +++++++++++++++ integrationTests/testInitializer.go | 38 +--- integrationTests/testProcessorNode.go | 55 +++-- .../testProcessorNodeWithMultisigner.go | 189 ++++++++++++++++++ integrationTests/testWalletAccount.go | 15 -- 7 files changed, 443 insertions(+), 82 deletions(-) create mode 100644 integrationTests/mock/keyMock.go create mode 100644 integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go create mode 100644 integrationTests/testProcessorNodeWithMultisigner.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 447806b802c..c539979cbd7 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -1154,14 +1154,12 @@ func createNode( if err != nil { return nil, errors.New("error creating node: " + err.Error()) } + err = nd.CreateShardedStores() if err != nil { return nil, err } - err = nd.StartHeartbeat(config.Heartbeat, version, config.GeneralSettings.NodeDisplayName) - if err != nil { - return nil, err - } + err = nd.CreateShardGenesisBlock() if err != nil { return nil, err diff --git a/integrationTests/mock/keyMock.go b/integrationTests/mock/keyMock.go new file mode 100644 index 00000000000..eea03d6b37a --- /dev/null +++ b/integrationTests/mock/keyMock.go @@ -0,0 +1,64 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" +) + +type PublicKeyMock struct { +} + +type PrivateKeyMock struct { +} + +type KeyGenMock struct { +} + +//------- PublicKeyMock + +func (sspk *PublicKeyMock) ToByteArray() ([]byte, error) { + return []byte("pubKey"), nil +} + +func (sspk *PublicKeyMock) Suite() crypto.Suite { + return nil +} + +func (sspk *PublicKeyMock) Point() crypto.Point { + return nil +} + +//------- PrivateKeyMock + +func (sk *PrivateKeyMock) ToByteArray() ([]byte, error) { + return []byte("privKey"), nil +} + +func (sk *PrivateKeyMock) GeneratePublic() crypto.PublicKey { + return &PublicKeyMock{} +} + +func (sk *PrivateKeyMock) Suite() crypto.Suite { + return nil +} + +func (sk *PrivateKeyMock) Scalar() crypto.Scalar { + return nil +} + +//------KeyGenMock + +func (keyGen *KeyGenMock) GeneratePair() (crypto.PrivateKey, crypto.PublicKey) { + return &PrivateKeyMock{}, &PublicKeyMock{} +} + +func (keyGen *KeyGenMock) PrivateKeyFromByteArray(b []byte) (crypto.PrivateKey, error) { + return &PrivateKeyMock{}, nil +} + +func (keyGen *KeyGenMock) PublicKeyFromByteArray(b []byte) (crypto.PublicKey, error) { + return &PublicKeyMock{}, nil +} + +func (keyGen *KeyGenMock) Suite() crypto.Suite { + return nil +} diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go new file mode 100644 index 00000000000..7b8f08ddf28 --- /dev/null +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -0,0 +1,158 @@ +package block + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 7 + nbMetaNodes := 7 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Shard node generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + + nodesMap[0][0].BroadcastBlock(body, header) + + time.Sleep(time.Second) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain have the block header in pool as interceptor validates it + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard have the block in pool as interceptor validates it + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} + +func TestInterceptedMetaBlockAreVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 7 + nbMetaNodes := 7 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Metachain node Generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _ := integrationTests.ProposeBlockWithConsensusSignature( + sharding.MetachainShardId, + nodesMap, + round, + nonce, + randomness, + ) + + // change the round so that signature becomes invalid + header.SetRound(2) + nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) + + time.Sleep(time.Second) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 24ffe895742..0e6d35bfa78 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1107,40 +1107,14 @@ func ProposeAndSyncOneBlock( return round, nonce } -// CreateValidatorKeys -func CreateValidatorKeys(nodesPerShard int, nbMetaNodes int, nbShards int) map[uint32][]*TestKeyPair { - suite := kyber.NewBlakeSHA256Ed25519() - keyGen := signing.NewKeyGenerator(suite) - - keysMap := make(map[uint32][]*TestKeyPair) - keyPairs := make([]*TestKeyPair, nodesPerShard) - for shardId := 0; shardId < nbShards; shardId++ { - for n := 0; n < nodesPerShard; n++ { - kp := &TestKeyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[uint32(shardId)] = keyPairs - } - - keyPairs = make([]*TestKeyPair, nbMetaNodes) - for n := 0; n < nbMetaNodes; n++ { - kp := &TestKeyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[sharding.MetachainShardId] = keyPairs - - return keysMap -} - +// PubKeysMapFromKeysMap returns a map of public keys per shard from the key pairs per shard map. func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { keysMap := make(map[uint32][]string, 0) for shardId, pairList := range keyPairMap { shardKeys := make([]string, len(pairList)) for i, pair := range pairList { - b, _ := pair.pk.ToByteArray() + b, _ := pair.Pk.ToByteArray() shardKeys[i] = string(b) } keysMap[shardId] = shardKeys @@ -1149,6 +1123,7 @@ func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]st return keysMap } +// GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { validatorsMap := make(map[uint32][]sharding.Validator) @@ -1164,8 +1139,9 @@ func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard return validatorsMap } +// CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { - suite := kyber.NewBlakeSHA256Ed25519() + suite := kyber.NewSuitePairingBn256() singleSigner := &singlesig.SchnorrSigner{} keyGen := signing.NewKeyGenerator(suite) @@ -1174,7 +1150,7 @@ func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *Cr for shardId := uint32(0); shardId < nbShards; shardId++ { for n := 0; n < nodesPerShard; n++ { kp := &TestKeyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() + kp.Sk, kp.Pk = keyGen.GeneratePair() keyPairs[n] = kp } keysMap[uint32(shardId)] = keyPairs @@ -1183,7 +1159,7 @@ func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *Cr keyPairs = make([]*TestKeyPair, nbMetaNodes) for n := 0; n < nbMetaNodes; n++ { kp := &TestKeyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() + kp.Sk, kp.Pk = keyGen.GeneratePair() keyPairs[n] = kp } keysMap[sharding.MetachainShardId] = keyPairs diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 51620291b99..db695bfc9c4 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -60,8 +60,8 @@ var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { - sk crypto.PrivateKey - pk crypto.PublicKey + Sk crypto.PrivateKey + Pk crypto.PublicKey } //CryptoParams holds crypto parametres @@ -79,6 +79,7 @@ type TestProcessorNode struct { Messenger p2p.Messenger OwnAccount *TestWalletAccount + NodeKeys *TestKeyPair ShardDataPool dataRetriever.PoolsHolder MetaDataPool dataRetriever.MetaPoolsHolder @@ -108,6 +109,8 @@ type TestProcessorNode struct { BlockProcessor process.BlockProcessor BroadcastMessenger consensus.BroadcastMessenger + MultiSigner crypto.MultiSigner + //Node is used to call the functionality already implemented in it Node *node.Node ScDataGetter external.ScDataGetter @@ -128,6 +131,8 @@ func NewTestProcessorNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ @@ -136,36 +141,12 @@ func NewTestProcessorNode( NodesCoordinator: nodesCoordinator, } - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) - tpn.initDataPools() - tpn.initTestNode() - - return tpn -} - -// NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator -func NewTestProcessorNodeWithCustomNodesCoordinator( - maxShards uint32, - nodeShardId uint32, - initialNodeAddr string, - nodesCoordinator sharding.NodesCoordinator, - cp *CryptoParams, - keyIndex int, -) *TestProcessorNode { - - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, } - - sk := cp.Keys[nodeShardId][keyIndex].sk - pk := cp.Keys[nodeShardId][keyIndex].pk - - tpn.OwnAccount = CreateTestWalletAccountWithSkPk(sk, pk, cp.KeyGen) + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNode() @@ -178,6 +159,8 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, @@ -185,6 +168,11 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 NodesCoordinator: nodesCoordinator, } + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { tpn.ShardDataPool = dPool @@ -483,12 +471,15 @@ func (tpn *TestProcessorNode) initNode() { node.WithAccountsAdapter(tpn.AccntState), node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), node.WithShardCoordinator(tpn.ShardCoordinator), + node.WithNodesCoordinator(tpn.NodesCoordinator), node.WithBlockChain(tpn.BlockChain), node.WithUint64ByteSliceConverter(TestUint64Converter), - node.WithMultiSigner(TestMultiSig), + node.WithMultiSigner(tpn.MultiSigner), node.WithSingleSigner(tpn.OwnAccount.SingleSigner), node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), + node.WithPrivKey(tpn.NodeKeys.Sk), + node.WithPubKey(tpn.NodeKeys.Pk), node.WithInterceptorsContainer(tpn.InterceptorsContainer), node.WithResolversFinder(tpn.ResolverFinder), node.WithBlockProcessor(tpn.BlockProcessor), diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go new file mode 100644 index 00000000000..743e966f52b --- /dev/null +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -0,0 +1,189 @@ +package integrationTests + +import ( + "bytes" + "context" + "fmt" + + "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/crypto" + kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator +func NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, +) *TestProcessorNode { + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + + llsig := &kmultisig.KyberMultiSignerBLS{} + blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + blsHasher, + pubKeysMap[nodeShardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + accountShardId := nodeShardId + if nodeShardId == sharding.MetachainShardId { + accountShardId = 0 + } + + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn +} + +// CreateNodesWithNodesCoordinator returns a map with nodes per shard each using a real nodes coordinator +func CreateNodesWithNodesCoordinator( + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, +) map[uint32][]*TestProcessorNode { + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + TestHasher, + shardId, + uint32(nbShards), + validatorsMap, + ) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap +} + +// ProposeBlockWithConsensusSignature proposes +func ProposeBlockWithConsensusSignature( + shardId uint32, + nodesMap map[uint32][]*TestProcessorNode, + round uint64, + nonce uint64, + randomness []byte, +) (data.BodyHandler, data.HeaderHandler, [][]byte) { + + nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) + if err != nil { + fmt.Println("Error getting the validators public keys: ", err) + } + + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) + // first node is block proposer + body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) + header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) + + return body, header, txHashes +} + +func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { + selectedNodes := make([]*TestProcessorNode, len(pubKeys)) + cntNodes := 0 + + for i, pk := range pubKeys { + for _, node := range nodes { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + if bytes.Equal(pubKeyBytes, []byte(pk)) { + selectedNodes[i] = node + cntNodes++ + } + } + } + + if cntNodes != len(pubKeys) { + fmt.Println("Error selecting nodes from public keys") + } + + return selectedNodes +} + +// DoConsensusSigningOnBlock simulates a consensus aggregated signature on the provided block +func DoConsensusSigningOnBlock( + blockHeader data.HeaderHandler, + consensusNodes []*TestProcessorNode, + pubKeys []string, +) data.HeaderHandler { + // set bitmap for all consensus nodes signing + bitmap := make([]byte, len(consensusNodes)/8+1) + for i := range bitmap { + bitmap[i] = 0xFF + } + + bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) + blockHeader.SetPubKeysBitmap(bitmap) + // clear signature, as we need to compute it below + blockHeader.SetSignature(nil) + blockHeader.SetPubKeysBitmap(nil) + blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) + blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) + + var msig crypto.MultiSigner + msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) + _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) + + for i := 1; i < len(consensusNodes); i++ { + msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) + sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) + _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) + } + + sig, _ := msigProposer.AggregateSigs(bitmap) + blockHeader.SetSignature(sig) + blockHeader.SetPubKeysBitmap(bitmap) + + return blockHeader +} diff --git a/integrationTests/testWalletAccount.go b/integrationTests/testWalletAccount.go index 4acd810dc71..6984fa40248 100644 --- a/integrationTests/testWalletAccount.go +++ b/integrationTests/testWalletAccount.go @@ -31,21 +31,6 @@ func CreateTestWalletAccount(coordinator sharding.Coordinator, shardId uint32) * return testWalletAccount } -// CreateTestWalletAccount creates an wallett account in a selected shard -func CreateTestWalletAccountWithSkPk(sk crypto.PrivateKey, pk crypto.PublicKey, keyGen crypto.KeyGenerator) *TestWalletAccount { - testWalletAccount := &TestWalletAccount{} - - testWalletAccount.SingleSigner = &singlesig.SchnorrSigner{} - - testWalletAccount.SkTxSign = sk - testWalletAccount.PkTxSign = pk - testWalletAccount.PkTxSignBytes, _ = pk.ToByteArray() - testWalletAccount.KeygenTxSign = keyGen - testWalletAccount.Address, _ = TestAddressConverter.CreateAddressFromPublicKeyBytes(testWalletAccount.PkTxSignBytes) - - return testWalletAccount -} - // initCrypto initializes the crypto for the account func (twa *TestWalletAccount) initCrypto(coordinator sharding.Coordinator, shardId uint32) { twa.SingleSigner = &singlesig.SchnorrSigner{} From 67c0f31eb7933c9cf057285a188545f57770320b Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 22 Aug 2019 13:50:12 +0300 Subject: [PATCH 068/234] integrationTests: Fix integration test --- .../interceptedHeadersSigVerification_test.go | 18 +++++++++--------- integrationTests/testInitializer.go | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index 7b8f08ddf28..fb718f586cc 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -11,13 +11,15 @@ import ( "github.com/stretchr/testify/assert" ) +const broadcastDelay = 2* time.Second + func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } - nodesPerShard := 7 - nbMetaNodes := 7 + nodesPerShard := 4 + nbMetaNodes := 4 nbShards := 1 consensusGroupSize := 3 @@ -60,7 +62,7 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing nodesMap[0][0].BroadcastBlock(body, header) - time.Sleep(time.Second) + time.Sleep(broadcastDelay) headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) @@ -80,13 +82,13 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing } } -func TestInterceptedMetaBlockAreVerifiedWithCorrectConsensusGroup(t *testing.T) { +func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } - nodesPerShard := 7 - nbMetaNodes := 7 + nodesPerShard := 4 + nbMetaNodes := 4 nbShards := 1 consensusGroupSize := 3 @@ -133,11 +135,9 @@ func TestInterceptedMetaBlockAreVerifiedWithCorrectConsensusGroup(t *testing.T) randomness, ) - // change the round so that signature becomes invalid - header.SetRound(2) nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) - time.Sleep(time.Second) + time.Sleep(broadcastDelay) headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 0e6d35bfa78..e989da432fd 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" "math/big" "strings" "sync" @@ -32,6 +31,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/p2p" @@ -673,7 +673,7 @@ func CreateNodes( for i := 0; i < numMetaChainNodes; i++ { metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) - idx := i + numOfShards*nodesPerShard + idx = i + numOfShards*nodesPerShard nodes[idx] = metaNode } @@ -1153,7 +1153,7 @@ func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *Cr kp.Sk, kp.Pk = keyGen.GeneratePair() keyPairs[n] = kp } - keysMap[uint32(shardId)] = keyPairs + keysMap[shardId] = keyPairs } keyPairs = make([]*TestKeyPair, nbMetaNodes) From 0dff1e307a2613e2dcdd7fa56434cbf69d65b6fa Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Sun, 25 Aug 2019 23:17:36 +0300 Subject: [PATCH 069/234] process, sharding, consensus, node, integrationTests, config, dataRetriever: fees for proposer and rewards for consensus group members Add rewardTransactions pool Adapt integration tests & config --- cmd/node/config/config.toml | 14 + cmd/node/factory/structs.go | 74 +++-- config/config.go | 2 + consensus/mock/blockProcessorMock.go | 5 +- consensus/mock/mockTestInitializer.go | 2 +- consensus/mock/nodesCoordinatorMock.go | 42 ++- .../spos/commonSubround/subroundStartRound.go | 4 +- .../commonSubround/subroundStartRound_test.go | 6 +- consensus/spos/consensusCoreValidator_test.go | 2 +- consensus/spos/consensusState.go | 17 +- consensus/spos/consensusState_test.go | 35 +-- data/address/specialAddresses.go | 27 +- data/block/block.go | 6 +- data/mock/unsignedTxHandlerMock.go | 19 +- dataRetriever/dataPool/shardDataPool.go | 11 + dataRetriever/dataPool/shardDataPool_test.go | 27 ++ dataRetriever/errors.go | 3 + dataRetriever/interface.go | 5 +- dataRetriever/mock/poolsHolderStub.go | 5 + integrationTests/consensus/testInitializer.go | 2 + integrationTests/mock/blockProcessorMock.go | 28 +- integrationTests/mock/nodesCoordinatorMock.go | 34 ++- .../mock/specialAddressHandlerMock.go | 9 +- .../mock/unsignedTxHandlerMock.go | 20 +- .../smartContract/testInitilalizer.go | 14 +- integrationTests/testInitializer.go | 2 + integrationTests/testProcessorNode.go | 11 +- node/mock/blockProcessorStub.go | 4 + node/mock/nodesCoordinatorMock.go | 42 ++- node/mock/poolsHolderStub.go | 5 + process/block/baseProcess.go | 26 +- process/block/baseProcess_test.go | 8 + process/block/export_test.go | 4 + process/block/metablock.go | 11 + process/block/metablock_test.go | 84 ++++++ process/block/shardblock.go | 30 +- process/block/shardblock_test.go | 162 ++++++++++ process/coordinator/process.go | 37 +-- process/errors.go | 4 +- process/factory/factory.go | 2 + .../intermediateProcessorsContainerFactory.go | 8 +- process/interface.go | 8 +- process/mock/blockProcessorMock.go | 4 + process/mock/nodesCoordinatorMock.go | 44 ++- process/mock/poolsHolderFake.go | 14 +- process/mock/poolsHolderStub.go | 5 + process/mock/specialAddressHandlerMock.go | 9 +- process/mock/unsignedTxHandlerMock.go | 20 +- process/smartContract/export_test.go | 3 +- process/smartContract/process.go | 18 +- process/transaction/process.go | 19 +- process/unsigned/feeTxHandler.go | 257 ---------------- process/unsigned/rewardsHandler.go | 283 ++++++++++++++++++ ...Handler_test.go => rewardsHandler_test.go} | 125 ++++---- sharding/indexHashedNodesCoordinator.go | 28 +- sharding/interface.go | 1 + 56 files changed, 1144 insertions(+), 547 deletions(-) delete mode 100644 process/unsigned/feeTxHandler.go create mode 100644 process/unsigned/rewardsHandler.go rename process/unsigned/{feeTxHandler_test.go => rewardsHandler_test.go} (55%) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 7df785026a0..e12547035e1 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -97,6 +97,16 @@ BatchDelaySeconds = 15 MaxBatchSize = 45000 +[RewardTxStorage] + [RewardTxStorage.Cache] + Size = 10000 + Type = "LRU" + [RewardTxStorage.DB] + FilePath = "RewardTransactions" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 500 + [ShardHdrNonceHashStorage] [ShardHdrNonceHashStorage.Cache] Size = 1000 @@ -164,6 +174,10 @@ Size = 100000 Type = "LRU" +[RewardTransactionDataPool] + Size = 5000 + Type = "LRU" + [ShardHeadersDataPool] Size = 1000 Type = "LRU" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 832e1aefa04..e11b18c6c38 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -496,6 +496,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err blockProcessor, blockTracker, err := newBlockProcessorAndTracker( resolversFinder, args.shardCoordinator, + args.nodesCoordinator, args.data, args.core, args.state, @@ -660,6 +661,7 @@ func createShardDataStoreFromConfig( var txUnit *storageUnit.Unit var metachainHeaderUnit *storageUnit.Unit var unsignedTxUnit *storageUnit.Unit + var rewardTxUnit *storageUnit.Unit var metaHdrHashNonceUnit *storageUnit.Unit var shardHdrHashNonceUnit *storageUnit.Unit var err error @@ -682,6 +684,9 @@ func createShardDataStoreFromConfig( if unsignedTxUnit != nil { _ = unsignedTxUnit.DestroyUnit() } + if rewardTxUnit != nil { + _ = rewardTxUnit.DestroyUnit() + } if metachainHeaderUnit != nil { _ = metachainHeaderUnit.DestroyUnit() } @@ -710,6 +715,14 @@ func createShardDataStoreFromConfig( return nil, err } + rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.RewardTxStorage.Cache), + getDBFromConfig(config.RewardTxStorage.DB, uniqueID), + getBloomFromConfig(config.RewardTxStorage.Bloom)) + if err != nil { + return nil, err + } + miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( getCacherFromConfig(config.MiniBlocksStorage.Cache), getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), @@ -768,6 +781,7 @@ func createShardDataStoreFromConfig( store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) @@ -893,7 +907,13 @@ func createShardDataPoolFromConfig( uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) if err != nil { - log.Info("error creating smart contract result") + log.Info("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTxDataPool)) + if err != nil { + log.Info("error creating transaction fees pool") return nil, err } @@ -940,6 +960,7 @@ func createShardDataPoolFromConfig( return dataPool.NewShardedDataPool( txPool, uTxPool, + rewardTxPool, hdrPool, hdrNonces, txBlockBody, @@ -1263,6 +1284,7 @@ func generateGenesisHeadersForInit( func newBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, state *State, @@ -1272,10 +1294,26 @@ func newBlockProcessorAndTracker( coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { + //TODO: replace with correct community address and invalid burnAddress + communityAddress, _ := hex.DecodeString("1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420") + burnAddress, _ := hex.DecodeString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + + // TODO: construct this correctly on the PR + specialAddressHolder, err := address.NewSpecialAddressHolder( + communityAddress, + burnAddress, + state.AddressConverter, + shardCoordinator) + if err != nil { + return nil, nil, err + } + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return newShardBlockProcessorAndTracker( resolversFinder, shardCoordinator, + nodesCoordinator, + specialAddressHolder, data, core, state, @@ -1289,6 +1327,8 @@ func newBlockProcessorAndTracker( return newMetaBlockProcessorAndTracker( resolversFinder, shardCoordinator, + nodesCoordinator, + specialAddressHolder, data, core, state, @@ -1304,6 +1344,8 @@ func newBlockProcessorAndTracker( func newShardBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, @@ -1327,30 +1369,12 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - //TODO: replace with valid address - communityAddress, _ := hex.DecodeString("1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420") - burnAddress := communityAddress - leaderAddress := communityAddress - - // TODO: construct this correctly on the PR - specialAddressHolder, err := address.NewSpecialAddressHolder( - communityAddress, - burnAddress, - state.AddressConverter, - shardCoordinator) - if err != nil { - return nil, nil, err - } - - // TODO: remove when valid leader address is set in each round - specialAddressHolder.SetLeaderAddress(leaderAddress) - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, core.Marshalizer, core.Hasher, state.AddressConverter, - specialAddressHolder, + specialAddressHandler, data.Store, ) if err != nil { @@ -1367,12 +1391,12 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - feeTxInterim, err := interimProcContainer.Get(dataBlock.TxFeeBlock) + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlockType) if err != nil { return nil, nil, err } - feeTxHandler, ok := feeTxInterim.(process.UnsignedTxHandler) + feeTxHandler, ok := rewardsTxInterim.(process.UnsignedTxHandler) if !ok { return nil, nil, process.ErrWrongTypeAssertion } @@ -1476,6 +1500,8 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, state.AccountsAdapter, shardCoordinator, + nodesCoordinator, + specialAddressHandler, forkDetector, blockTracker, shardsGenesisBlocks, @@ -1498,6 +1524,8 @@ func newShardBlockProcessorAndTracker( func newMetaBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, @@ -1526,6 +1554,8 @@ func newMetaBlockProcessorAndTracker( data.MetaDatapool, forkDetector, shardCoordinator, + nodesCoordinator, + specialAddressHandler, core.Hasher, core.Marshalizer, data.Store, diff --git a/config/config.go b/config/config.go index 77ba878d2d4..32fb434ff54 100644 --- a/config/config.go +++ b/config/config.go @@ -62,6 +62,7 @@ type Config struct { BlockHeaderStorage StorageConfig TxStorage StorageConfig UnsignedTransactionStorage StorageConfig + RewardTxStorage StorageConfig ShardHdrNonceHashStorage StorageConfig MetaHdrNonceHashStorage StorageConfig @@ -79,6 +80,7 @@ type Config struct { BlockHeaderNoncesDataPool CacheConfig TxDataPool CacheConfig UnsignedTransactionDataPool CacheConfig + RewardTxDataPool CacheConfig MetaBlockBodyDataPool CacheConfig MiniBlockHeaderHashesDataPool CacheConfig diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 8e73c670be0..9455ecef1fb 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -38,7 +38,7 @@ func (blProcMock *BlockProcessorMock) RevertAccountState() { blProcMock.RevertAccountStateCalled() } -// CreateTxBlockBody mocks the creation of a transaction block body +// CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { return blProcMock.CreateBlockCalled(round, haveTime) } @@ -66,3 +66,6 @@ func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHa func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } + +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(addresses []string) { +} diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index f738bf6582c..e45adea2759 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -122,7 +122,7 @@ func InitConsensusCore() *ConsensusCoreMock { rounderMock := &RounderMock{} shardCoordinatorMock := ShardCoordinatorMock{} syncTimerMock := &SyncTimerMock{} - validatorGroupSelector := NodesCoordinatorMock{} + validatorGroupSelector := &NodesCoordinatorMock{} container := &ConsensusCoreMock{ blockChain, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 420b4bedab8..43f7f9c450f 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -7,11 +7,12 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -36,7 +37,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } @@ -55,22 +56,45 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index b52367059af..12058c3dd06 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -206,7 +206,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, err := sr.GetNextConsensusGroup( + nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( randomSeed, uint64(sr.RoundIndex), shardId, @@ -227,5 +227,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) + sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses) + return nil } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 80c75c1645a..77ecb1d5729 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -323,7 +323,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenShouldSyncRetur func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.NodesCoordinatorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32) ([]sharding.Validator, error) { return nil, err @@ -340,7 +340,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.NodesCoordinatorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} validatorGroupSelector.ComputeValidatorsGroupCalled = func( bytes []byte, round uint64, @@ -431,7 +431,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { t.Parallel() - validatorGroupSelector := mock.NodesCoordinatorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") validatorGroupSelector.ComputeValidatorsGroupCalled = func( diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index 8b908126912..bc13a911c76 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -22,7 +22,7 @@ func initConsensusDataContainer() *ConsensusCore { rounderMock := &mock.RounderMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} syncTimerMock := &mock.SyncTimerMock{} - validatorGroupSelector := mock.NodesCoordinatorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} return &ConsensusCore{ blockChain: blockChain, diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index c88db5e3dd9..cb34e1b472a 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -100,22 +100,23 @@ func (cns *ConsensusState) GetNextConsensusGroup( round uint64, shardId uint32, nodesCoordinator sharding.NodesCoordinator, -) ([]string, - error) { +) ([]string, []string, error) { validatorsGroup, err := nodesCoordinator.ComputeValidatorsGroup(randomSource, round, shardId) - if err != nil { - return nil, err + return nil, nil, err } - newConsensusGroup := make([]string, 0) + consensusSize := len(validatorsGroup) + newConsensusGroup := make([]string, consensusSize) + consensusRewardAddresses := make([]string, consensusSize) - for i := 0; i < len(validatorsGroup); i++ { - newConsensusGroup = append(newConsensusGroup, string(validatorsGroup[i].PubKey())) + for i := 0; i < consensusSize; i++ { + newConsensusGroup[i] = string(validatorsGroup[i].PubKey()) + consensusRewardAddresses[i] = string(validatorsGroup[i].Address()) } - return newConsensusGroup, nil + return newConsensusGroup, consensusRewardAddresses, nil } // IsConsensusDataSet method returns true if the consensus data for the current round is set and false otherwise diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 04dc63cfc66..eb67fc567a2 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -138,7 +138,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou cns := internalInitConsensusState() - nodesCoordinator := mock.NodesCoordinatorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} err := errors.New("error") nodesCoordinator.ComputeValidatorsGroupCalled = func( randomness []byte, @@ -148,7 +148,7 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou return nil, err } - _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator) + _, _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator) assert.Equal(t, err, err2) } @@ -157,11 +157,12 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { cns := internalInitConsensusState() - nodesCoordinator := mock.NodesCoordinatorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} - nextConsensusGroup, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator) + nextConsensusGroup, rewardAddresses, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) + assert.NotNil(t, rewardAddresses) } func TestConsensusState_IsConsensusDataSetShouldReturnTrue(t *testing.T) { @@ -213,13 +214,13 @@ func TestConsensusState_IsJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, false) + _ = cns.SetJobDone("1", bn.SrBlock, false) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("1", bn.SrCommitment, true) + _ = cns.SetJobDone("1", bn.SrCommitment, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("2", bn.SrBlock, true) + _ = cns.SetJobDone("2", bn.SrBlock, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -228,7 +229,7 @@ func TestConsensusState_IsJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.True(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -238,13 +239,13 @@ func TestConsensusState_IsSelfJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -253,7 +254,7 @@ func TestConsensusState_IsSelfJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.True(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -352,7 +353,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenSelfJobIsDone(t *te cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) } @@ -363,7 +364,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenCurrentRoundIsFinis cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsFinished) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -375,7 +376,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsNotFinished) assert.True(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -417,7 +418,7 @@ func TestConsensusState_CanProcessReceivedMessageShouldReturnFalseWhenJobIsDone( PubKey: []byte("1"), } - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.False(t, cns.CanProcessReceivedMessage(cnsDta, 0, bn.SrBlock)) } @@ -459,7 +460,7 @@ func TestConsensusState_GenerateBitmapShouldWork(t *testing.T) { selfIndexInConsensusGroup, _ := cns.SelfConsensusGroupIndex() bitmapExpected[selfIndexInConsensusGroup/8] |= 1 << (uint16(selfIndexInConsensusGroup) % 8) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) bitmap := cns.GenerateBitmap(bn.SrBlock) assert.Equal(t, bitmapExpected, bitmap) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 10d4aa6dee5..910ce82b412 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -7,11 +7,11 @@ import ( ) type specialAddresses struct { - elrond []byte - leaderAddress []byte - burnAddress []byte - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + elrond []byte + consensusRewardAddresses []string + burnAddress []byte + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewSpecialAddressHolder creates a special address holder @@ -59,14 +59,23 @@ func (sp *specialAddresses) BurnAddress() []byte { return sp.burnAddress } -// SetLeaderAddress provides leaders address -func (sp *specialAddresses) SetLeaderAddress(leader []byte) { - sp.leaderAddress = leader +// SetConsensusRewardAddresses sets the consensus rewards addresses for the round +func (sp *specialAddresses) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + sp.consensusRewardAddresses = consensusRewardAddresses } // LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - return sp.leaderAddress + if sp.consensusRewardAddresses == nil { + return nil + } + + return []byte(sp.consensusRewardAddresses[0]) +} + +// ConsensusRewardAddresses provides the consensus reward addresses +func (sp *specialAddresses) ConsensusRewardAddresses() []string { + return sp.consensusRewardAddresses } // ShardIdForAddress calculates shard id for address diff --git a/data/block/block.go b/data/block/block.go index 2e670df62c7..fc07c1f0098 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -34,8 +34,8 @@ const ( PeerBlock Type = 2 // SmartContractResultBlock identifies a miniblock holding smartcontractresults SmartContractResultBlock Type = 3 - // TxFeeBlock identifies a miniblock holding accumulated transaction fees - TxFeeBlock Type = 4 + // RewardsBlockType identifies a miniblock holding accumulated rewards, both system generated and from tx fees + RewardsBlockType Type = 4 // InvalidBlock identifies identifies an invalid miniblock InvalidBlock Type = 5 ) @@ -51,6 +51,8 @@ func (bType Type) String() string { return "PeerBody" case SmartContractResultBlock: return "SmartContractResultBody" + case RewardsBlockType: + return "RewardsBody" case InvalidBlock: return "InvalidBlock" default: diff --git a/data/mock/unsignedTxHandlerMock.go b/data/mock/unsignedTxHandlerMock.go index 9ac5fd55a10..128aad96e97 100644 --- a/data/mock/unsignedTxHandlerMock.go +++ b/data/mock/unsignedTxHandlerMock.go @@ -2,17 +2,18 @@ package mock import ( "github.com/ElrondNetwork/elrond-go/data" + "math/big" ) type UnsignedTxHandlerMock struct { - CleanProcessedUtxsCalled func() - AddProcessedUTxCalled func(tx data.TransactionHandler) - CreateAllUTxsCalled func() []data.TransactionHandler - VerifyCreatedUTxsCalled func() error - AddTxFeeFromBlockCalled func(tx data.TransactionHandler) + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) } -func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { if ut.AddTxFeeFromBlockCalled == nil { return } @@ -28,12 +29,12 @@ func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { ut.CleanProcessedUtxsCalled() } -func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { - if ut.AddProcessedUTxCalled == nil { +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { return } - ut.AddProcessedUTxCalled(tx) + ut.ProcessTransactionFeeCalled(cost) } func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { diff --git a/dataRetriever/dataPool/shardDataPool.go b/dataRetriever/dataPool/shardDataPool.go index 1625105a710..fdd2e012665 100644 --- a/dataRetriever/dataPool/shardDataPool.go +++ b/dataRetriever/dataPool/shardDataPool.go @@ -8,6 +8,7 @@ import ( type shardedDataPool struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher headersNonces dataRetriever.Uint64SyncMapCacher @@ -19,6 +20,7 @@ type shardedDataPool struct { func NewShardedDataPool( transactions dataRetriever.ShardedDataCacherNotifier, unsignedTransactions dataRetriever.ShardedDataCacherNotifier, + rewardTransactions dataRetriever.ShardedDataCacherNotifier, headers storage.Cacher, headersNonces dataRetriever.Uint64SyncMapCacher, miniBlocks storage.Cacher, @@ -32,6 +34,9 @@ func NewShardedDataPool( if unsignedTransactions == nil { return nil, dataRetriever.ErrNilUnsignedTransactionPool } + if rewardTransactions == nil { + return nil, dataRetriever.ErrNilRewardTransactionPool + } if headers == nil { return nil, dataRetriever.ErrNilHeadersDataPool } @@ -51,6 +56,7 @@ func NewShardedDataPool( return &shardedDataPool{ transactions: transactions, unsignedTransactions: unsignedTransactions, + rewardTransactions: rewardTransactions, headers: headers, headersNonces: headersNonces, miniBlocks: miniBlocks, @@ -69,6 +75,11 @@ func (tdp *shardedDataPool) UnsignedTransactions() dataRetriever.ShardedDataCach return tdp.unsignedTransactions } +// RewardTransactions returns the holder for reward transactions (cross shard result entities) +func (tdp *shardedDataPool) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return tdp.rewardTransactions +} + // Headers returns the holder for headers func (tdp *shardedDataPool) Headers() storage.Cacher { return tdp.headers diff --git a/dataRetriever/dataPool/shardDataPool_test.go b/dataRetriever/dataPool/shardDataPool_test.go index 4b47d68cfa1..14882486d01 100644 --- a/dataRetriever/dataPool/shardDataPool_test.go +++ b/dataRetriever/dataPool/shardDataPool_test.go @@ -15,6 +15,7 @@ func TestNewShardedDataPool_NilTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( nil, &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -30,6 +31,7 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( &mock.ShardedDataStub{}, nil, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -41,8 +43,25 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { assert.Nil(t, tdp) } +func TestNewShardedDataPool_NilRewardTransactionsShouldErr(t *testing.T) { + tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, + nil, + &mock.CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + ) + + assert.Equal(t, dataRetriever.ErrNilRewardTransactionPool, err) + assert.Nil(t, tdp) +} + func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, nil, @@ -58,6 +77,7 @@ func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -73,6 +93,7 @@ func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -88,6 +109,7 @@ func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -103,6 +125,7 @@ func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -119,6 +142,7 @@ func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { transactions := &mock.ShardedDataStub{} scResults := &mock.ShardedDataStub{} + rewardTransactions := &mock.ShardedDataStub{} headers := &mock.CacherStub{} headerNonces := &mock.Uint64SyncMapCacherStub{} txBlocks := &mock.CacherStub{} @@ -127,6 +151,7 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( transactions, scResults, + rewardTransactions, headers, headerNonces, txBlocks, @@ -137,6 +162,8 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) //pointer checking assert.True(t, transactions == tdp.Transactions()) + assert.True(t, scResults == tdp.UnsignedTransactions()) + assert.True(t, rewardTransactions == tdp.RewardTransactions()) assert.True(t, headers == tdp.Headers()) assert.True(t, headerNonces == tdp.HeadersNonces()) assert.True(t, txBlocks == tdp.MiniBlocks()) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 5f550836f60..368483ae6b2 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -64,6 +64,9 @@ var ErrNilTxDataPool = errors.New("nil transaction data pool") // ErrNilUnsignedTransactionPool signals that a nil unsigned transactions pool has been provided var ErrNilUnsignedTransactionPool = errors.New("nil unsigned transactions data pool") +// ErrNilRewardTransactionPool signals that a nil reward transactions pool has been provided +var ErrNilRewardTransactionPool = errors.New("nil fee transaction data pool") + // ErrNilHeadersDataPool signals that a nil header pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index dd6284f9875..f6d8bac316d 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -28,8 +28,10 @@ const ( MetaPeerDataUnit UnitType = 6 // UnsignedTransactionUnit is the unsigned transaction unit identifier UnsignedTransactionUnit UnitType = 7 + // RewardTransactionUnit is the reward transaction unit identifier + RewardTransactionUnit UnitType = 8 // MetaHdrNonceHashDataUnit is the meta header nonce-hash pair data unit identifier - MetaHdrNonceHashDataUnit UnitType = 8 + MetaHdrNonceHashDataUnit UnitType = 9 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 @@ -184,6 +186,7 @@ type Uint64SyncMapCacher interface { type PoolsHolder interface { Transactions() ShardedDataCacherNotifier UnsignedTransactions() ShardedDataCacherNotifier + RewardTransactions() ShardedDataCacherNotifier Headers() storage.Cacher HeadersNonces() Uint64SyncMapCacher MiniBlocks() storage.Cacher diff --git a/dataRetriever/mock/poolsHolderStub.go b/dataRetriever/mock/poolsHolderStub.go index 76a9302e710..44bc204aaa5 100644 --- a/dataRetriever/mock/poolsHolderStub.go +++ b/dataRetriever/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -42,3 +43,7 @@ func (phs *PoolsHolderStub) MetaBlocks() storage.Cacher { func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } + +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index a90ae8c5aca..2b96427034b 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -192,6 +192,7 @@ func createTestStore() dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -211,6 +212,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index a0a2e7b60a6..f95cdbf04af 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -10,18 +10,19 @@ import ( // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - NrCommitBlockCalled uint32 - Marshalizer marshal.Marshalizer - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + NrCommitBlockCalled uint32 + Marshalizer marshal.Marshalizer + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddressesCalled func([]string) } // ProcessBlock mocks pocessing a block @@ -91,3 +92,6 @@ func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHa func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } + +func(blProcMock BlockProcessorMock) SetConsensusRewardAddresses ([]string){ +} diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index bb3c681e0cc..38a046473e0 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -7,9 +7,10 @@ import ( type NodesCoordinatorMock struct { ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -24,7 +25,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( randomness []byte, round uint64, shardId uint32, @@ -47,14 +48,37 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( return pubKeys, nil } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 2c6f4207c50..407b3a6c7e0 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -5,12 +5,19 @@ type SpecialAddressHandlerMock struct { LeaderAddressCalled func() []byte BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) (uint32, error) + + addresses []string } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetLeaderAddress(leader []byte) { +func (sh *SpecialAddressHandlerMock) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + sh.addresses = consensusRewardAddresses +} + +func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go index 9ac5fd55a10..7097c4a31e8 100644 --- a/integrationTests/mock/unsignedTxHandlerMock.go +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -1,18 +1,20 @@ package mock import ( + "math/big" + "github.com/ElrondNetwork/elrond-go/data" ) type UnsignedTxHandlerMock struct { - CleanProcessedUtxsCalled func() - AddProcessedUTxCalled func(tx data.TransactionHandler) - CreateAllUTxsCalled func() []data.TransactionHandler - VerifyCreatedUTxsCalled func() error - AddTxFeeFromBlockCalled func(tx data.TransactionHandler) + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) } -func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { if ut.AddTxFeeFromBlockCalled == nil { return } @@ -28,12 +30,12 @@ func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { ut.CleanProcessedUtxsCalled() } -func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { - if ut.AddProcessedUTxCalled == nil { +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { return } - ut.AddProcessedUTxCalled(tx) + ut.ProcessTransactionFeeCalled(cost) } func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index b1bff13845d..c95f717ac10 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -219,6 +219,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -238,6 +239,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -339,8 +341,8 @@ func createNetNode( ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - txFeeInter, _ := interimProcContainer.Get(dataBlock.TxFeeBlock) - txFeeHandler, _ := txFeeInter.(process.UnsignedTxHandler) + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlockType) + rewardsHandler, _ := rewardsInter.(process.UnsignedTxHandler) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) vmContainer := &mock.VMContainerMock{ @@ -358,7 +360,7 @@ func createNetNode( addrConv, shardCoordinator, scForwarder, - txFeeHandler, + rewardsHandler, ) txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) @@ -370,7 +372,7 @@ func createNetNode( testMarshalizer, shardCoordinator, scProcessor, - txFeeHandler, + rewardsHandler, txTypeHandler, ) @@ -407,6 +409,8 @@ func createNetNode( testMarshalizer, accntAdapter, shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { return nil @@ -745,6 +749,8 @@ func createMetaNetNode( }, }, shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{}, testHasher, testMarshalizer, store, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 24ffe895742..ae435347aed 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -94,6 +94,7 @@ func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dat } uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -113,6 +114,7 @@ func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dat dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 51620291b99..29ee1d23605 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -74,9 +74,10 @@ type CryptoParams struct { // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator sharding.NodesCoordinator - Messenger p2p.Messenger + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Messenger p2p.Messenger OwnAccount *TestWalletAccount @@ -434,6 +435,8 @@ func (tpn *TestProcessorNode) initBlockProcessor() { tpn.MetaDataPool, tpn.ForkDetector, tpn.ShardCoordinator, + tpn.NodesCoordinator, + &mock.SpecialAddressHandlerMock{}, TestHasher, TestMarshalizer, tpn.Storage, @@ -450,6 +453,8 @@ func (tpn *TestProcessorNode) initBlockProcessor() { TestMarshalizer, tpn.AccntState, tpn.ShardCoordinator, + tpn.NodesCoordinator, + &mock.SpecialAddressHandlerMock{}, tpn.ForkDetector, tpn.BlockTracker, tpn.GenesisBlocks, diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index d2d2ad9a9fb..e6cf7959e81 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -71,3 +71,7 @@ func (blProcMock BlockProcessorStub) DecodeBlockHeader(dta []byte) data.HeaderHa func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } + +func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string) { + panic("implement me") +} diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index b3d1f307dea..198b38f2c32 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -7,11 +7,12 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -36,7 +37,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( randomness []byte, round uint64, shardId uint32, @@ -59,22 +60,45 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } diff --git a/node/mock/poolsHolderStub.go b/node/mock/poolsHolderStub.go index dc0f6247560..b6a03b08bb5 100644 --- a/node/mock/poolsHolderStub.go +++ b/node/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher MetaHeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher @@ -47,3 +48,7 @@ func (phs *PoolsHolderStub) MetaHeadersNonces() dataRetriever.Uint64SyncMapCache func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } + +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 9df2d5d9477..89e391337ec 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -30,14 +30,16 @@ type hashAndHdr struct { type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - forkDetector process.ForkDetector - hasher hashing.Hasher - marshalizer marshal.Marshalizer - store dataRetriever.StorageService - uint64Converter typeConverters.Uint64ByteSliceConverter - blockSizeThrottler process.BlockSizeThrottler + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + specialAddressHandler process.SpecialAddressHandler + accounts state.AccountsAdapter + forkDetector process.ForkDetector + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + uint64Converter typeConverters.Uint64ByteSliceConverter + blockSizeThrottler process.BlockSizeThrottler mutNotarizedHdrs sync.RWMutex notarizedHdrs mapShardHeaders @@ -468,6 +470,8 @@ func checkProcessorNilParameters( marshalizer marshal.Marshalizer, store dataRetriever.StorageService, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, uint64Converter typeConverters.Uint64ByteSliceConverter, ) error { @@ -489,6 +493,12 @@ func checkProcessorNilParameters( if shardCoordinator == nil { return process.ErrNilShardCoordinator } + if nodesCoordinator == nil { + return process.ErrNilNodesCoordinator + } + if specialAddressHandler == nil { + return process.ErrNilSpecialAddressHandler + } if uint64Converter == nil { return process.ErrNilUint64Converter } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index f6b8a40e11f..95fd18b8ff4 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -317,6 +317,8 @@ func TestBlockProcessor_CheckBlockValidity(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewOneShardCoordinatorMock()), @@ -399,6 +401,8 @@ func TestVerifyStateRoot_ShouldWork(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewOneShardCoordinatorMock()), @@ -423,6 +427,8 @@ func TestBlockProcessor_computeHeaderHashMarshalizerFail1ShouldErr(t *testing.T) marshalizer, &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewOneShardCoordinatorMock()), @@ -459,6 +465,8 @@ func TestBlockPorcessor_ComputeNewNoncePrevHashShouldWork(t *testing.T) { marshalizer, &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewOneShardCoordinatorMock()), diff --git a/process/block/export_test.go b/process/block/export_test.go index f64166196be..61aaed8c4c0 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -60,6 +60,8 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, genesisBlocks, @@ -77,6 +79,8 @@ func NewMetaProcessorBasicSingleShard(mdp dataRetriever.MetaPoolsHolder, genesis mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, diff --git a/process/block/metablock.go b/process/block/metablock.go index 72762068881..017fca171c6 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -51,6 +51,8 @@ func NewMetaProcessor( dataPool dataRetriever.MetaPoolsHolder, forkDetector process.ForkDetector, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, hasher hashing.Hasher, marshalizer marshal.Marshalizer, store dataRetriever.StorageService, @@ -66,6 +68,8 @@ func NewMetaProcessor( marshalizer, store, shardCoordinator, + nodesCoordinator, + specialAddressHandler, uint64Converter) if err != nil { return nil, err @@ -94,6 +98,8 @@ func NewMetaProcessor( marshalizer: marshalizer, store: store, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, uint64Converter: uint64Converter, onRequestHeaderHandler: requestHandler.RequestHeader, onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, @@ -205,6 +211,11 @@ func (mp *metaProcessor) ProcessBlock( return nil } +// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group +func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + // TODO set the reward addresses for metachain consensus nodes +} + func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) if err != nil { diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index e2934af7210..5fbfff182a1 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -131,6 +131,8 @@ func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -151,6 +153,8 @@ func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { nil, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -172,6 +176,8 @@ func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { mdp, nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -193,6 +199,8 @@ func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -214,6 +222,8 @@ func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, nil, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -235,6 +245,8 @@ func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, nil, &mock.ChainStorerMock{}, @@ -256,6 +268,8 @@ func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, nil, @@ -277,6 +291,8 @@ func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -372,6 +388,8 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -476,6 +494,8 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -513,6 +533,8 @@ func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -567,6 +589,8 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -608,6 +632,8 @@ func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -647,6 +673,8 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizer, &mock.ChainStorerMock{}, @@ -691,6 +719,8 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { }, }, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, store, @@ -726,6 +756,8 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, store, @@ -764,6 +796,8 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { mdp, fd, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, &mock.MarshalizerMock{}, store, @@ -826,6 +860,8 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { mdp, fd, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, &mock.MarshalizerMock{}, store, @@ -873,6 +909,8 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -910,6 +948,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *tes mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -932,6 +972,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -958,6 +1000,8 @@ func TestMetaProcessor_DisplayLogInfo(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -982,6 +1026,8 @@ func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFai initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1011,6 +1057,8 @@ func TestMetaProcessor_CreateBlockHeaderShouldWork(t *testing.T) { initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1042,6 +1090,8 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1064,6 +1114,8 @@ func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1093,6 +1145,8 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1186,6 +1240,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(5), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1265,6 +1321,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1382,6 +1440,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1548,6 +1608,8 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1672,6 +1734,8 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldErrNilMetaBlockHeader(t *testi mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1715,6 +1779,8 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasherMock, marshalizerMock, store, @@ -1754,6 +1820,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1847,6 +1915,8 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1941,6 +2011,8 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1998,6 +2070,8 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2061,6 +2135,8 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2173,6 +2249,8 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2279,6 +2357,8 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2388,6 +2468,8 @@ func TestMetaProcessor_DecodeBlockBody(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, @@ -2416,6 +2498,8 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a2e39461a0b..c48ed008874 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -64,6 +64,8 @@ func NewShardProcessor( marshalizer marshal.Marshalizer, accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, forkDetector process.ForkDetector, blocksTracker process.BlocksTracker, startHeaders map[uint32]data.HeaderHandler, @@ -79,6 +81,8 @@ func NewShardProcessor( marshalizer, store, shardCoordinator, + nodesCoordinator, + specialAddressHandler, uint64Converter) if err != nil { return nil, err @@ -110,6 +114,8 @@ func NewShardProcessor( marshalizer: marshalizer, store: store, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, uint64Converter: uint64Converter, onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, } @@ -193,6 +199,14 @@ func (sp *shardProcessor) ProcessBlock( log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + sp.shardCoordinator.SelfId(), + ) + + sp.SetConsensusRewardAddresses(consensusAddresses) sp.txCoordinator.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -261,6 +275,11 @@ func (sp *shardProcessor) ProcessBlock( return nil } +// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group +func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + sp.specialAddressHandler.SetConsensusRewardAddresses(consensusRewardAddresses) +} + // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { metablockCache := sp.dataPool.MetaBlocks() @@ -297,7 +316,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head }) for _, metaHdr := range currAddedMetaHdrs { - err := sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err } @@ -333,7 +352,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round // found a header with the next nonce if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) if err != nil { continue } @@ -551,6 +570,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[int][][]b // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + sp.txCoordinator.CreateBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() @@ -745,7 +765,7 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain(round uint64) (* continue } - err := sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) + err = sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) if err != nil { continue } @@ -835,7 +855,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromPool(body block.Body, header crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[string(miniBlockHashes[key])] + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] if !ok { continue } @@ -1285,7 +1305,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( continue } - err := sp.isHdrConstructionValid(hdr, lastMetaHdr) + err = sp.isHdrConstructionValid(hdr, lastMetaHdr) if err != nil { continue } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index bf48a6e9f27..ff82748ff7b 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -95,6 +95,8 @@ func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -117,6 +119,8 @@ func TestNewShardProcessor_NilStoreShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -139,6 +143,8 @@ func TestNewShardProcessor_NilHasherShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -161,6 +167,8 @@ func TestNewShardProcessor_NilMarshalizerShouldWork(t *testing.T) { nil, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -183,6 +191,8 @@ func TestNewShardProcessor_NilAccountsAdapterShouldErr(t *testing.T) { &mock.MarshalizerMock{}, nil, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -205,6 +215,8 @@ func TestNewShardProcessor_NilShardCoordinatorShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -227,6 +239,8 @@ func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, nil, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -249,6 +263,8 @@ func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, nil, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -271,6 +287,8 @@ func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -296,6 +314,8 @@ func TestNewShardProcessor_NilTransactionPoolShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -318,6 +338,8 @@ func TestNewShardProcessor_NilTxCoordinator(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -340,6 +362,8 @@ func TestNewShardProcessor_NilUint64Converter(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -362,6 +386,8 @@ func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -386,6 +412,8 @@ func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -409,6 +437,8 @@ func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -432,6 +462,8 @@ func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -454,6 +486,8 @@ func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -492,6 +526,8 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { RevertToSnapshotCalled: revToSnapshot, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -545,6 +581,8 @@ func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ ProbableHighestNonceCalled: func() uint64 { return 0 @@ -655,6 +693,8 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -679,6 +719,8 @@ func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -711,6 +753,8 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -743,6 +787,8 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing. &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ ProbableHighestNonceCalled: func() uint64 { return 0 @@ -874,6 +920,8 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR &mock.MarshalizerMock{}, accounts, shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ ProbableHighestNonceCalled: func() uint64 { return 0 @@ -960,6 +1008,8 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ ProbableHighestNonceCalled: func() uint64 { return 0 @@ -1046,6 +1096,8 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1129,6 +1181,8 @@ func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1213,6 +1267,8 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1354,6 +1410,8 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1419,6 +1477,8 @@ func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1505,6 +1565,8 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. RootHashCalled: rootHashCalled, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1634,6 +1696,8 @@ func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1731,6 +1795,8 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing marshalizer, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, genesisBlocks, @@ -1784,6 +1850,8 @@ func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1835,6 +1903,8 @@ func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing. marshalizer, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -1893,6 +1963,8 @@ func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { return nil @@ -1969,6 +2041,8 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { return nil @@ -2033,6 +2107,8 @@ func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2148,6 +2224,8 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, fd, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2233,6 +2311,8 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, fd, &mock.BlocksTrackerMock{ AddBlockCalled: func(headerHandler data.HeaderHandler) { @@ -2341,6 +2421,8 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { &mock.MarshalizerMock{}, accounts, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, fd, &mock.BlocksTrackerMock{ AddBlockCalled: func(headerHandler data.HeaderHandler) { @@ -2410,6 +2492,8 @@ func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T RevertToSnapshotCalled: revToSnapshot, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2444,6 +2528,8 @@ func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T RevertToSnapshotCalled: revToSnapshot, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2482,6 +2568,8 @@ func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { RootHashCalled: rootHashfunc, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2509,6 +2597,8 @@ func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2601,6 +2691,8 @@ func TestShardProcessor_DisplayLogInfo(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(shardCoordinator), @@ -2623,6 +2715,8 @@ func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2648,6 +2742,8 @@ func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testi &mock.MarshalizerMock{Fail: true}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2689,6 +2785,8 @@ func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2738,6 +2836,8 @@ func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) RevertToSnapshotCalled: revToSnapshot, }, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2806,6 +2906,8 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2842,6 +2944,8 @@ func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2870,6 +2974,8 @@ func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -2934,6 +3040,8 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3015,6 +3123,8 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3083,6 +3193,8 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3160,6 +3272,8 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3193,6 +3307,8 @@ func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTyp &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, startHeaders, @@ -3288,6 +3404,8 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3404,6 +3522,8 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T marshalizer, accntAdapter, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3481,6 +3601,8 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { marshalizer, initAccountsMock(), shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{ GetHighestFinalBlockNonceCalled: func() uint64 { return 0 @@ -3531,6 +3653,8 @@ func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing. &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3554,6 +3678,8 @@ func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing. &mock.MarshalizerMock{}, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3627,6 +3753,8 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { marshalizerMock, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3681,6 +3809,8 @@ func TestShardProcessor_DecodeBlockBody(t *testing.T) { marshalizerMock, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3713,6 +3843,8 @@ func TestShardProcessor_DecodeBlockHeader(t *testing.T) { marshalizerMock, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -3754,6 +3886,8 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), @@ -3868,6 +4002,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, forkDetector, &mock.BlocksTrackerMock{ RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { @@ -4031,6 +4167,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, forkDetector, &mock.BlocksTrackerMock{ RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { @@ -4159,6 +4297,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin marshalizer, initAccountsMock(), mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, forkDetector, &mock.BlocksTrackerMock{ RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { @@ -4316,6 +4456,8 @@ func TestShardProcessor_CheckHeaderBodyCorrelationReceiverMissmatch(t *testing.T &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4341,6 +4483,8 @@ func TestShardProcessor_CheckHeaderBodyCorrelationSenderMissmatch(t *testing.T) &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4366,6 +4510,8 @@ func TestShardProcessor_CheckHeaderBodyCorrelationTxCountMissmatch(t *testing.T) &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4391,6 +4537,8 @@ func TestShardProcessor_CheckHeaderBodyCorrelationHashMissmatch(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4416,6 +4564,8 @@ func TestShardProcessor_CheckHeaderBodyCorrelationShouldPass(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4459,6 +4609,8 @@ func TestShardProcessor_restoreMetaBlockIntoPoolShouldPass(t *testing.T) { &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4566,6 +4718,8 @@ func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4607,6 +4761,8 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t &mock.MarshalizerMock{}, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), @@ -4639,6 +4795,8 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd marshalizer, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, genesisBlocks, @@ -4703,6 +4861,8 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu marshalizer, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, genesisBlocks, @@ -4766,6 +4926,8 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt marshalizer, &mock.AccountsStub{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, genesisBlocks, diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 846a347c976..9c1f82a9944 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -489,49 +489,34 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( miniBlocks = append(miniBlocks, interMBs...) } - tc.addTxFeeToMatchingMiniBlocks(&miniBlocks) + tc.addRewardsMiniBlocks(&miniBlocks) return miniBlocks } -func (tc *transactionCoordinator) addTxFeeToMatchingMiniBlocks(miniBlocks *block.MiniBlockSlice) { - // add txfee transactions to matching blocks - interimProc := tc.getInterimProcessor(block.TxFeeBlock) +func (tc *transactionCoordinator) addRewardsMiniBlocks(miniBlocks *block.MiniBlockSlice) { + // add rewards transactions to separate miniBlocks + interimProc := tc.getInterimProcessor(block.RewardsBlockType) if interimProc == nil { return } - txFeeMbs := interimProc.CreateAllInterMiniBlocks() - for key, mb := range txFeeMbs { - var matchingMBFound bool - for i := 0; i < len(*miniBlocks); i++ { - currMb := (*miniBlocks)[i] - if currMb.ReceiverShardID == key && - currMb.SenderShardID == tc.shardCoordinator.SelfId() && - currMb.Type == block.TxBlock { - currMb.TxHashes = append(currMb.TxHashes, mb.TxHashes...) - matchingMBFound = true - break - } - } - - if !matchingMBFound { + rewardsMbs := interimProc.CreateAllInterMiniBlocks() + for key, mb := range rewardsMbs { mb.ReceiverShardID = key mb.SenderShardID = tc.shardCoordinator.SelfId() - mb.Type = block.TxBlock + mb.Type = block.RewardsBlockType *miniBlocks = append(*miniBlocks, mb) - } } } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { miniBlocks := make(block.MiniBlockSlice, 0) - // processing has to be done in order, as the order of different type of transactions over the same account is strict // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { - if blockType == block.TxFeeBlock { + if blockType == block.RewardsBlockType { // this has to be processed last continue } @@ -600,6 +585,8 @@ func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType b baseTopic = factory.PeerChBodyTopic case block.SmartContractResultBlock: baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlockType: + baseTopic = factory.RewardsTransactionTopic default: return "", process.ErrUnknownBlockType } @@ -747,7 +734,7 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body wg.Add(len(tc.interimProcessors)) for key, interimProc := range tc.interimProcessors { - if key == block.TxFeeBlock { + if key == block.RewardsBlockType { // this has to be processed last wg.Done() continue @@ -770,7 +757,7 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body return errFound } - interimProc := tc.getInterimProcessor(block.TxFeeBlock) + interimProc := tc.getInterimProcessor(block.RewardsBlockType) if interimProc == nil { return nil } diff --git a/process/errors.go b/process/errors.go index 9712db7dbba..07f897314f3 100644 --- a/process/errors.go +++ b/process/errors.go @@ -430,8 +430,8 @@ var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") // ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") -// ErrTxsFeesDoesNotMatch signals that txs fees do not match -var ErrTxsFeesDoesNotMatch = errors.New("calculated tx fees with block tx fee does not match") +// ErrTxsFeesDoNotMatch signals that txs fees do not match +var ErrTxsFeesDoNotMatch = errors.New("calculated tx fees with block tx fee does not match") // ErrTxsFeesNotFound signals that the tx fee not found var ErrTxsFeesNotFound = errors.New("tx fees not found") diff --git a/process/factory/factory.go b/process/factory/factory.go index 7b28e970d00..519de841acb 100644 --- a/process/factory/factory.go +++ b/process/factory/factory.go @@ -5,6 +5,8 @@ const ( TransactionTopic = "transactions" // UnsignedTransactionTopic is the topic used for sharing unsigned transactions UnsignedTransactionTopic = "unsignedTransactions" + // RewardsTransactionTopic is the topic used for sharing fee transactions + RewardsTransactionTopic = "rewardsTransactions" // HeadersTopic is the topic used for sharing block headers HeadersTopic = "headers" // MiniBlocksTopic is the topic used for sharing mini blocks diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 84c89da5c60..d0c4cd669d6 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -75,12 +75,12 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } - interproc, err = ppcm.createTxFeeIntermediateProcessor() + interproc, err = ppcm.createRewardsTxIntermediateProcessor() if err != nil { return nil, err } - err = container.Add(block.TxFeeBlock, interproc) + err = container.Add(block.RewardsBlockType, interproc) if err != nil { return nil, err } @@ -101,8 +101,8 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn return irp, err } -func (ppcm *intermediateProcessorsContainerFactory) createTxFeeIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := unsigned.NewFeeTxHandler( +func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := unsigned.NewRewardTxHandler( ppcm.specialAddressHandler, ppcm.hasher, ppcm.marshalizer, diff --git a/process/interface.go b/process/interface.go index ae4e1d0b9eb..8b68426eac6 100644 --- a/process/interface.go +++ b/process/interface.go @@ -88,17 +88,18 @@ type TransactionVerifier interface { // UnsignedTxHandler creates and verifies unsigned transactions for current round type UnsignedTxHandler interface { CleanProcessedUTxs() - AddProcessedUTx(tx data.TransactionHandler) + ProcessTransactionFee(cost *big.Int) CreateAllUTxs() []data.TransactionHandler VerifyCreatedUTxs() error - AddTxFeeFromBlock(tx data.TransactionHandler) + AddRewardTxFromBlock(tx data.TransactionHandler) } // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { SetElrondCommunityAddress(elrond []byte) ElrondCommunityAddress() []byte - SetLeaderAddress(leader []byte) + SetConsensusRewardAddresses(consensusRewardAddresses []string) + ConsensusRewardAddresses() []string LeaderAddress() []byte BurnAddress() []byte ShardIdForAddress([]byte) (uint32, error) @@ -137,6 +138,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddresses(consensusRewardAddresses []string) } // Checker provides functionality to checks the integrity and validity of a data structure diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 5a15a428f83..0156833536b 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -66,3 +66,7 @@ func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHa func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } + +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string){ + panic("implement me") +} \ No newline at end of file diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 324cd59e858..3ce79896273 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -8,16 +8,17 @@ import ( // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]sharding.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) - LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) } func NewNodesCoordinatorMock() *NodesCoordinatorMock { @@ -71,6 +72,29 @@ func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( return valGrStr, nil } +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { if ncm.LoadNodesPerShardsCalled != nil { return ncm.LoadNodesPerShardsCalled(nodes) diff --git a/process/mock/poolsHolderFake.go b/process/mock/poolsHolderFake.go index 0db88242433..d94491dbf7d 100644 --- a/process/mock/poolsHolderFake.go +++ b/process/mock/poolsHolderFake.go @@ -11,7 +11,8 @@ import ( type PoolsHolderFake struct { transactions dataRetriever.ShardedDataCacherNotifier - unsignedtransactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher hdrNonces dataRetriever.Uint64SyncMapCacher @@ -23,7 +24,8 @@ type PoolsHolderFake struct { func NewPoolsHolderFake() *PoolsHolderFake { phf := &PoolsHolderFake{} phf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) - phf.unsignedtransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) phf.headers, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) phf.metaBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) cacheHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) @@ -46,7 +48,11 @@ func (phf *PoolsHolderFake) Transactions() dataRetriever.ShardedDataCacherNotifi } func (phf *PoolsHolderFake) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { - return phf.unsignedtransactions + return phf.unsignedTransactions +} + +func (phf *PoolsHolderFake) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phf.rewardTransactions } func (phf *PoolsHolderFake) Headers() storage.Cacher { @@ -78,5 +84,5 @@ func (phf *PoolsHolderFake) SetTransactions(transactions dataRetriever.ShardedDa } func (phf *PoolsHolderFake) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { - phf.unsignedtransactions = scrs + phf.unsignedTransactions = scrs } diff --git a/process/mock/poolsHolderStub.go b/process/mock/poolsHolderStub.go index 76a9302e710..44bc204aaa5 100644 --- a/process/mock/poolsHolderStub.go +++ b/process/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -42,3 +43,7 @@ func (phs *PoolsHolderStub) MetaBlocks() storage.Cacher { func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } + +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 2c6f4207c50..407b3a6c7e0 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -5,12 +5,19 @@ type SpecialAddressHandlerMock struct { LeaderAddressCalled func() []byte BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) (uint32, error) + + addresses []string } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetLeaderAddress(leader []byte) { +func (sh *SpecialAddressHandlerMock) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + sh.addresses = consensusRewardAddresses +} + +func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go index 9ac5fd55a10..7097c4a31e8 100644 --- a/process/mock/unsignedTxHandlerMock.go +++ b/process/mock/unsignedTxHandlerMock.go @@ -1,18 +1,20 @@ package mock import ( + "math/big" + "github.com/ElrondNetwork/elrond-go/data" ) type UnsignedTxHandlerMock struct { - CleanProcessedUtxsCalled func() - AddProcessedUTxCalled func(tx data.TransactionHandler) - CreateAllUTxsCalled func() []data.TransactionHandler - VerifyCreatedUTxsCalled func() error - AddTxFeeFromBlockCalled func(tx data.TransactionHandler) + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) } -func (ut *UnsignedTxHandlerMock) AddTxFeeFromBlock(tx data.TransactionHandler) { +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { if ut.AddTxFeeFromBlockCalled == nil { return } @@ -28,12 +30,12 @@ func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { ut.CleanProcessedUtxsCalled() } -func (ut *UnsignedTxHandlerMock) AddProcessedUTx(tx data.TransactionHandler) { - if ut.AddProcessedUTxCalled == nil { +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { return } - ut.AddProcessedUTxCalled(tx) + ut.ProcessTransactionFeeCalled(cost) } func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { diff --git a/process/smartContract/export_test.go b/process/smartContract/export_test.go index e229ad25eb0..5bd408c43ee 100644 --- a/process/smartContract/export_test.go +++ b/process/smartContract/export_test.go @@ -4,7 +4,6 @@ import ( "math/big" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -28,7 +27,7 @@ func (sc *scProcessor) ProcessVMOutput( tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64, -) ([]data.TransactionHandler, *feeTx.FeeTx, error) { +) ([]data.TransactionHandler, *big.Int, error) { return sc.processVMOutput(vmOutput, tx, acntSnd, round) } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 2f62c259686..922fe367c21 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -4,21 +4,20 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/process/factory" "math/big" "sync" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) type scExecutionState struct { @@ -172,7 +171,7 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( return err } - sc.txFeeHandler.AddProcessedUTx(consumedFee) + sc.txFeeHandler.ProcessTransactionFee(consumedFee) return nil } @@ -257,7 +256,7 @@ func (sc *scProcessor) DeploySmartContract( return err } - sc.txFeeHandler.AddProcessedUTx(consumedFee) + sc.txFeeHandler.ProcessTransactionFee(consumedFee) return nil } @@ -370,7 +369,7 @@ func (sc *scProcessor) processVMOutput( tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64, -) ([]data.TransactionHandler, *feeTx.FeeTx, error) { +) ([]data.TransactionHandler, *big.Int, error) { if vmOutput == nil { return nil, nil, process.ErrNilVMOutput } @@ -433,12 +432,7 @@ func (sc *scProcessor) processVMOutput( return nil, nil, err } - currFeeTx := &feeTx.FeeTx{ - Nonce: tx.Nonce, - Value: consumedFee, - } - - return crossTxs, currFeeTx, nil + return crossTxs, consumedFee, nil } // reloadLocalSndAccount will reload from current account state the sender account diff --git a/process/transaction/process.go b/process/transaction/process.go index 574975eb50e..9d69671e3f7 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -119,7 +119,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return process.ErrWrongTransaction } -func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*feeTx.FeeTx, error) { +func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { if acntSnd == nil { return nil, nil } @@ -146,12 +146,7 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st return nil, err } - currFeeTx := &feeTx.FeeTx{ - Nonce: tx.Nonce, - Value: cost, - } - - return currFeeTx, nil + return cost, nil } func (txProc *txProcessor) processAccumulatedTxFees( @@ -178,7 +173,7 @@ func (txProc *txProcessor) processAccumulatedTxFees( } if currTxFee.ShardId == txProc.shardCoordinator.SelfId() { - txProc.txFeeHandler.AddTxFeeFromBlock(currTxFee) + txProc.txFeeHandler.AddRewardTxFromBlock(currTxFee) } return nil @@ -196,7 +191,7 @@ func (txProc *txProcessor) processMoveBalance( return err } - currFeeTx, err := txProc.processTxFee(tx, acntSrc) + txFee, err := txProc.processTxFee(tx, acntSrc) if err != nil { return err } @@ -216,11 +211,7 @@ func (txProc *txProcessor) processMoveBalance( } } - if currFeeTx == nil || currFeeTx.IsInterfaceNil() { - return nil - } - - txProc.txFeeHandler.AddProcessedUTx(currFeeTx) + txProc.txFeeHandler.ProcessTransactionFee(txFee) return nil } diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go deleted file mode 100644 index 5dd89db7b15..00000000000 --- a/process/unsigned/feeTxHandler.go +++ /dev/null @@ -1,257 +0,0 @@ -package unsigned - -import ( - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/feeTx" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "math/big" - "sync" -) - -// TODO: Set MinGasPrice and MinTxFee to some positive value (TBD) -// MinGasPrice is the minimal gas price to be paid for any transaction -var MinGasPrice = uint64(0) - -// MinTxFee is the minimal fee to be paid for any transaction -var MinTxFee = uint64(0) - -const communityPercentage = 0.1 // 1 = 100%, 0 = 0% -const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% -const burnPercentage = 0.5 // 1 = 100%, 0 = 0% - -type feeTxHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mutTxs sync.Mutex - feeTxs []*feeTx.FeeTx - - feeTxsFromBlock map[string]*feeTx.FeeTx -} - -// NewFeeTxHandler constructor for the fx tee handler -func NewFeeTxHandler( - address process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, -) (*feeTxHandler, error) { - if address == nil { - return nil, process.ErrNilSpecialAddressHandler - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - - ftxh := &feeTxHandler{ - address: address, - hasher: hasher, - marshalizer: marshalizer, - } - ftxh.feeTxs = make([]*feeTx.FeeTx, 0) - ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) - - return ftxh, nil -} - -// SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saaved for txs -func (ftxh *feeTxHandler) SaveCurrentIntermediateTxToStorage() error { - //TODO implement me - save only created feeTxs - return nil -} - -// AddIntermediateTransactions adds intermediate transactions to local cache -func (ftxh *feeTxHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { - return nil -} - -// CreateAllInterMiniBlocks creates miniblocks from process transactions -func (ftxh *feeTxHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - calculatedFeeTxs := ftxh.CreateAllUTxs() - - miniBlocks := make(map[uint32]*block.MiniBlock) - for _, value := range calculatedFeeTxs { - dstShId, err := ftxh.address.ShardIdForAddress(value.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - continue - } - - txHash, err := core.CalculateHash(ftxh.marshalizer, ftxh.hasher, value) - if err != nil { - log.Debug(err.Error()) - continue - } - - var ok bool - var mb *block.MiniBlock - if mb, ok = miniBlocks[dstShId]; !ok { - mb = &block.MiniBlock{ - ReceiverShardID: dstShId, - } - } - - mb.TxHashes = append(mb.TxHashes, txHash) - miniBlocks[dstShId] = mb - } - - return miniBlocks -} - -// VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block -func (ftxh *feeTxHandler) VerifyInterMiniBlocks(body block.Body) error { - err := ftxh.VerifyCreatedUTxs() - ftxh.CleanProcessedUTxs() - - return err -} - -func (ftxh *feeTxHandler) CreateBlockStarted() { - ftxh.CleanProcessedUTxs() -} - -// CleanProcessedUTxs deletes the cached data -func (ftxh *feeTxHandler) CleanProcessedUTxs() { - ftxh.mutTxs.Lock() - ftxh.feeTxs = make([]*feeTx.FeeTx, 0) - ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) - ftxh.mutTxs.Unlock() -} - -// AddTxFeeFromBlock adds an existing txfee from block into local cache -func (ftxh *feeTxHandler) AddTxFeeFromBlock(tx data.TransactionHandler) { - currFeeTx, ok := tx.(*feeTx.FeeTx) - if !ok { - log.Error(process.ErrWrongTypeAssertion.Error()) - return - } - - ftxh.mutTxs.Lock() - ftxh.feeTxsFromBlock[string(tx.GetRecvAddress())] = currFeeTx - ftxh.mutTxs.Unlock() -} - -// AddProcessedUTx adds a new feeTx to the cache -func (ftxh *feeTxHandler) AddProcessedUTx(tx data.TransactionHandler) { - currFeeTx, ok := tx.(*feeTx.FeeTx) - if !ok { - log.Debug(process.ErrWrongTypeAssertion.Error()) - return - } - - ftxh.mutTxs.Lock() - ftxh.feeTxs = append(ftxh.feeTxs, currFeeTx) - ftxh.mutTxs.Unlock() -} - -func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { - x := new(big.Float).SetInt(value) - y := big.NewFloat(percentage) - - z := new(big.Float).Mul(x, y) - - op := big.NewInt(0) - result, _ := z.Int(op) - - return result -} - -func (ftxh *feeTxHandler) createLeaderTx(totalGathered *big.Int) *feeTx.FeeTx { - currTx := &feeTx.FeeTx{} - - currTx.Value = getPercentageOfValue(totalGathered, leaderPercentage) - currTx.RcvAddr = ftxh.address.LeaderAddress() - - return currTx -} - -func (ftxh *feeTxHandler) createBurnTx(totalGathered *big.Int) *feeTx.FeeTx { - currTx := &feeTx.FeeTx{} - - currTx.Value = getPercentageOfValue(totalGathered, burnPercentage) - currTx.RcvAddr = ftxh.address.BurnAddress() - - return currTx -} - -func (ftxh *feeTxHandler) createCommunityTx(totalGathered *big.Int) *feeTx.FeeTx { - currTx := &feeTx.FeeTx{} - - currTx.Value = getPercentageOfValue(totalGathered, communityPercentage) - currTx.RcvAddr = ftxh.address.ElrondCommunityAddress() - - return currTx -} - -// CreateAllUTxs creates all the needed fee transactions -// According to economic paper 50% burn, 40% to the leader, 10% to Elrond community fund -func (ftxh *feeTxHandler) CreateAllUTxs() []data.TransactionHandler { - ftxh.mutTxs.Lock() - defer ftxh.mutTxs.Unlock() - - totalFee := big.NewInt(0) - for _, val := range ftxh.feeTxs { - totalFee = totalFee.Add(totalFee, val.Value) - } - - if totalFee.Cmp(big.NewInt(1)) < 0 { - ftxh.feeTxs = make([]*feeTx.FeeTx, 0) - return nil - } - - leaderTx := ftxh.createLeaderTx(totalFee) - communityTx := ftxh.createCommunityTx(totalFee) - burnTx := ftxh.createBurnTx(totalFee) - - currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - - ftxh.feeTxs = make([]*feeTx.FeeTx, 0) - - return currFeeTxs -} - -// VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same -func (ftxh *feeTxHandler) VerifyCreatedUTxs() error { - calculatedFeeTxs := ftxh.CreateAllUTxs() - - ftxh.mutTxs.Lock() - defer ftxh.mutTxs.Unlock() - - totalFeesFromBlock := big.NewInt(0) - for _, value := range ftxh.feeTxsFromBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) - } - - totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedFeeTxs { - totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - - txFromBlock, ok := ftxh.feeTxsFromBlock[string(value.GetRecvAddress())] - if !ok { - return process.ErrTxsFeesNotFound - } - if txFromBlock.Value.Cmp(value.GetValue()) != 0 { - return process.ErrTxsFeesDoesNotMatch - } - } - - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - - return nil -} - -// CreateMarshalizedData creates the marshalized data for broadcasting purposes -func (ftxh *feeTxHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - // TODO: implement me - - return make([][]byte, 0), nil -} diff --git a/process/unsigned/rewardsHandler.go b/process/unsigned/rewardsHandler.go new file mode 100644 index 00000000000..87ff7310c47 --- /dev/null +++ b/process/unsigned/rewardsHandler.go @@ -0,0 +1,283 @@ +package unsigned + +import ( + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" +) + +// MinGasPrice is the minimal gas price to be paid for any transaction +// TODO: Set MinGasPrice and MinTxFee to some positive value (TBD) +var MinGasPrice = uint64(0) + +// MinTxFee is the minimal fee to be paid for any transaction +var MinTxFee = uint64(0) + +const communityPercentage = 0.1 // 1 = 100%, 0 = 0% +const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% +const burnPercentage = 0.5 // 1 = 100%, 0 = 0% + +// TODO: Replace with valid reward value +var rewardValue = big.NewInt(1000) + +type rewardsHandler struct { + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mut sync.Mutex + accumulatedFees *big.Int + + rewardTxsFromBlock map[string]*rewardTx.RewardTx +} + +// NewRewardTxHandler constructor for the reward transaction handler +func NewRewardTxHandler( + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, +) (*rewardsHandler, error) { + if address == nil { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + rtxh := &rewardsHandler{ + address: address, + hasher: hasher, + marshalizer: marshalizer, + } + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil +} + +// SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs +func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { + //TODO implement me - save only created accumulatedFees + return nil +} + +// AddIntermediateTransactions adds intermediate transactions to local cache +func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { + return nil +} + +// CreateAllInterMiniBlocks creates miniblocks from process transactions +func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + calculatedRewardTxs := rtxh.CreateAllUTxs() + + miniBlocks := make(map[uint32]*block.MiniBlock) + for _, rTx := range calculatedRewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks +} + +// VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block +func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { + err := rtxh.VerifyCreatedUTxs() + rtxh.CleanProcessedUTxs() + + return err +} + +// CreateBlockStarted does the cleanup before creating a new block +func (rtxh *rewardsHandler) CreateBlockStarted() { + rtxh.CleanProcessedUTxs() +} + +// CleanProcessedUTxs deletes the cached data +func (rtxh *rewardsHandler) CleanProcessedUTxs() { + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() +} + +// AddRewardTxFromBlock adds an existing reward transaction from block into local cache +func (rtxh *rewardsHandler) AddRewardTxFromBlock(tx data.TransactionHandler) { + currRewardTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error(process.ErrWrongTypeAssertion.Error()) + return + } + + rtxh.mut.Lock() + rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx + rtxh.mut.Unlock() +} + +// ProcessTransactionFee adds the tx cost to the accumulated amount +func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() +} + +func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) + + z := new(big.Float).Mul(x, y) + + op := big.NewInt(0) + result, _ := z.Int(op) + + return result +} + +func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() + + return currTx +} + +func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() + + return currTx +} + +func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + + return currTx +} + +// CreateAllUTxs creates all the needed reward transactions +// According to economic paper, out of the block fees 50% are burned, 40% go to the leader and 10% go +// to Elrond community fund. Fixed rewards for every validator are +func (rtxh *rewardsHandler) CreateAllUTxs() []data.TransactionHandler { + + rewardTxs := make([]data.TransactionHandler, 0) + rewardsFromFees := rtxh.createRewardTxsFromFee() + rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() + + rewardTxs = append(rewardTxs, rewardsFromFees...) + rewardTxs = append(rewardTxs, rewardsForConsensus...) + + return rewardTxs +} + +func (rtxh *rewardsHandler) createRewardTxsFromFee() []data.TransactionHandler { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } + + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() + + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + + rtxh.accumulatedFees = big.NewInt(0) + + return currFeeTxs +} + +func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.TransactionHandler { + consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardAddresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + return consensusRewardTxs +} + +// VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same +func (rtxh *rewardsHandler) VerifyCreatedUTxs() error { + calculatedFeeTxs := rtxh.CreateAllUTxs() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, value := range rtxh.rewardTxsFromBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedFeeTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] + if !ok { + return process.ErrTxsFeesNotFound + } + if txFromBlock.Value.Cmp(value.GetValue()) != 0 { + return process.ErrTxsFeesDoNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTotalTxsFeesDoNotMatch + } + + return nil +} + +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + // TODO: implement me + + return make([][]byte, 0), nil +} diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/rewardsHandler_test.go similarity index 55% rename from process/unsigned/feeTxHandler_test.go rename to process/unsigned/rewardsHandler_test.go index dc918403543..41416ed8133 100644 --- a/process/unsigned/feeTxHandler_test.go +++ b/process/unsigned/rewardsHandler_test.go @@ -1,19 +1,20 @@ package unsigned import ( - "github.com/ElrondNetwork/elrond-go/data/feeTx" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" - "math/big" - "testing" ) -func TestNewFeeTxHandler_NilSpecialAddress(t *testing.T) { +func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( nil, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -23,10 +24,10 @@ func TestNewFeeTxHandler_NilSpecialAddress(t *testing.T) { assert.Equal(t, process.ErrNilSpecialAddressHandler, err) } -func TestNewFeeTxHandler_NilHasher(t *testing.T) { +func TestNewRewardTxHandler_NilHasher(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, nil, &mock.MarshalizerMock{}, @@ -36,10 +37,10 @@ func TestNewFeeTxHandler_NilHasher(t *testing.T) { assert.Equal(t, process.ErrNilHasher, err) } -func TestNewFeeTxHandler_NilMarshalizer(t *testing.T) { +func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, nil, @@ -49,10 +50,10 @@ func TestNewFeeTxHandler_NilMarshalizer(t *testing.T) { assert.Equal(t, process.ErrNilMarshalizer, err) } -func TestNewFeeTxHandler_ValsOk(t *testing.T) { +func TestNewRewardTxHandler_ValsOk(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -62,10 +63,10 @@ func TestNewFeeTxHandler_ValsOk(t *testing.T) { assert.NotNil(t, th) } -func TestFeeTxHandler_AddIntermediateTransactions(t *testing.T) { +func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -78,10 +79,10 @@ func TestFeeTxHandler_AddIntermediateTransactions(t *testing.T) { assert.Nil(t, err) } -func TestFeeTxHandler_AddProcessedUTx(t *testing.T) { +func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -90,20 +91,20 @@ func TestFeeTxHandler_AddProcessedUTx(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, th) - th.AddProcessedUTx(nil) - assert.Equal(t, 0, len(th.feeTxs)) + th.ProcessTransactionFee(nil) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) - th.AddProcessedUTx(&transaction.Transaction{}) - assert.Equal(t, 0, len(th.feeTxs)) + th.ProcessTransactionFee(big.NewInt(10)) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) - th.AddProcessedUTx(&feeTx.FeeTx{}) - assert.Equal(t, 1, len(th.feeTxs)) + th.ProcessTransactionFee(big.NewInt(100)) + assert.Equal(t, big.NewInt(110), th.accumulatedFees) } -func TestFeeTxHandler_AddTxFeeFromBlock(t *testing.T) { +func TestRewardTxHandlerAddTxFeeFromBlock(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -112,20 +113,20 @@ func TestFeeTxHandler_AddTxFeeFromBlock(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, th) - th.AddTxFeeFromBlock(nil) - assert.Equal(t, 0, len(th.feeTxsFromBlock)) + th.AddRewardTxFromBlock(nil) + assert.Equal(t, 0, len(th.rewardTxsFromBlock)) - th.AddTxFeeFromBlock(&transaction.Transaction{}) - assert.Equal(t, 0, len(th.feeTxsFromBlock)) + th.AddRewardTxFromBlock(&transaction.Transaction{}) + assert.Equal(t, 0, len(th.rewardTxsFromBlock)) - th.AddTxFeeFromBlock(&feeTx.FeeTx{}) - assert.Equal(t, 1, len(th.feeTxsFromBlock)) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{}) + assert.Equal(t, 1, len(th.rewardTxsFromBlock)) } -func TestFeeTxHandler_CleanProcessedUTxs(t *testing.T) { +func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -134,20 +135,20 @@ func TestFeeTxHandler_CleanProcessedUTxs(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, th) - th.AddProcessedUTx(&feeTx.FeeTx{}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{}) - assert.Equal(t, 1, len(th.feeTxs)) - assert.Equal(t, 1, len(th.feeTxsFromBlock)) + th.ProcessTransactionFee(big.NewInt(10)) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{}) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + assert.Equal(t, 1, len(th.rewardTxsFromBlock)) th.CleanProcessedUTxs() - assert.Equal(t, 0, len(th.feeTxs)) - assert.Equal(t, 0, len(th.feeTxsFromBlock)) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + assert.Equal(t, 0, len(th.rewardTxsFromBlock)) } -func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { +func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -160,7 +161,7 @@ func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { assert.Equal(t, 0, len(txs)) currTxFee := big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.ProcessTransactionFee(currTxFee) txs = th.CreateAllUTxs() assert.Equal(t, 3, len(txs)) @@ -172,11 +173,11 @@ func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { assert.Equal(t, currTxFee.Uint64(), totalSum) } -func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { +func TestRewardTxHandlerVerifyCreatedUTxs(t *testing.T) { t.Parallel() addr := &mock.SpecialAddressHandlerMock{} - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( addr, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -189,13 +190,13 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { assert.Nil(t, err) currTxFee := big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.ProcessTransactionFee(currTxFee) err = th.VerifyCreatedUTxs() assert.Equal(t, process.ErrTxsFeesNotFound, err) badValue := big.NewInt(100) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: badValue}) err = th.VerifyCreatedUTxs() assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) @@ -204,8 +205,8 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { currTxFee = big.NewInt(50) halfCurrTxFee := big.NewInt(25) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) + th.ProcessTransactionFee(currTxFee) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: halfCurrTxFee}) err = th.VerifyCreatedUTxs() assert.Equal(t, process.ErrTxsFeesNotFound, err) @@ -213,19 +214,19 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { th.CleanProcessedUTxs() currTxFee = big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}) + th.ProcessTransactionFee(currTxFee) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}) err = th.VerifyCreatedUTxs() assert.Nil(t, err) } -func TestFeeTxHandler_CreateAllInterMiniBlocks(t *testing.T) { +func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { t.Parallel() - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -238,17 +239,17 @@ func TestFeeTxHandler_CreateAllInterMiniBlocks(t *testing.T) { assert.Equal(t, 0, len(mbs)) currTxFee := big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.ProcessTransactionFee(currTxFee) mbs = th.CreateAllInterMiniBlocks() assert.Equal(t, 1, len(mbs)) } -func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { +func TestRewardTxHandlerVerifyInterMiniBlocks(t *testing.T) { t.Parallel() addr := &mock.SpecialAddressHandlerMock{} - th, err := NewFeeTxHandler( + th, err := NewRewardTxHandler( addr, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -261,13 +262,13 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { assert.Nil(t, err) currTxFee := big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) + th.ProcessTransactionFee(currTxFee) err = th.VerifyInterMiniBlocks(nil) assert.Equal(t, process.ErrTxsFeesNotFound, err) badValue := big.NewInt(100) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: badValue}) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: badValue}) err = th.VerifyInterMiniBlocks(nil) assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) @@ -276,8 +277,8 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { currTxFee = big.NewInt(50) halfCurrTxFee := big.NewInt(25) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: halfCurrTxFee}) + th.ProcessTransactionFee(currTxFee) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: halfCurrTxFee}) err = th.VerifyInterMiniBlocks(nil) assert.Equal(t, process.ErrTxsFeesNotFound, err) @@ -285,7 +286,7 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { th.CleanProcessedUTxs() currTxFee = big.NewInt(50) - th.AddProcessedUTx(&feeTx.FeeTx{Value: currTxFee}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) - th.AddTxFeeFromBlock(&feeTx.FeeTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) + th.ProcessTransactionFee(currTxFee) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) + th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) } diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 58fc4dded67..5d890b4e7f5 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -144,9 +144,13 @@ func (ihgs *indexHashedNodesCoordinator) GetValidatorWithPublicKey(publicKey []b return nil, 0, ErrValidatorNotFound } -// GetValidatorsPublicKeys calculates the validators group for a specific randomness, +// GetValidatorsPublicKeys calculates the validators consensus group for a specific shard, randomness and round number, // returning their public keys -func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) if err != nil { return nil, err @@ -161,6 +165,26 @@ func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys(randomness []by return pubKeys, nil } +// GetValidatorsRewardsAddresses calculates the validator consensus group for a specific shard, randomness and round +// number, returning their staking/rewards addresses +func (ihgs *indexHashedNodesCoordinator) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, len(consensusNodes)) + for i, v := range consensusNodes { + addresses[i] = string(v.Address()) + } + + return addresses, nil +} + // GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap // TODO: This function needs to be revised when the requirements are clarified func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { diff --git a/sharding/interface.go b/sharding/interface.go index 41ca3e34442..c499b44099b 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -39,4 +39,5 @@ type NodesCoordinator interface { type PublicKeysSelector interface { GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) } From c03856369c5419b5c4f8b9d6f7eba74ea85f5f8b Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 26 Aug 2019 10:20:08 +0300 Subject: [PATCH 070/234] cmd, process, dataRetriever, integrationTests: reward transaction preprocessor, processor, requester & resolver Adapted unit tests and integration tests --- cmd/node/factory/structs.go | 18 +- dataRetriever/errors.go | 3 + .../requestHandlers/requestHandler.go | 39 +- .../requestHandlers/requestHandler_test.go | 86 +++- integrationTests/mock/blockProcessorMock.go | 5 +- .../smartContract/testInitilalizer.go | 9 + integrationTests/testProcessorNode.go | 9 + process/block/baseProcess_test.go | 44 ++ .../block/preprocess/rewardTxPreProcessor.go | 455 ++++++++++++++++++ .../preprocess}/rewardsHandler.go | 2 +- .../preprocess}/rewardsHandler_test.go | 2 +- process/block/shardblock_test.go | 7 + process/constants.go | 4 +- process/coordinator/process_test.go | 43 ++ process/coordinator/transactionType.go | 8 +- process/coordinator/transactionType_test.go | 10 +- process/errors.go | 15 +- .../intermediateProcessorsContainerFactory.go | 3 +- .../shard/preProcessorsContainerFactory.go | 75 ++- .../preProcessorsContainerFactory_test.go | 45 +- process/interface.go | 7 + process/mock/requestHandlerMock.go | 8 + process/mock/rewardTxProcessorMock.go | 17 + process/rewardTransaction/process.go | 91 ++++ process/transaction/export_test.go | 6 +- process/transaction/process.go | 32 +- 26 files changed, 958 insertions(+), 85 deletions(-) create mode 100644 process/block/preprocess/rewardTxPreProcessor.go rename process/{unsigned => block/preprocess}/rewardsHandler.go (99%) rename process/{unsigned => block/preprocess}/rewardsHandler_test.go (99%) create mode 100644 process/mock/rewardTxProcessorMock.go create mode 100644 process/rewardTransaction/process.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index e11b18c6c38..8b8c46d82d3 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "io" "math/big" "path/filepath" @@ -1396,7 +1397,7 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - feeTxHandler, ok := rewardsTxInterim.(process.UnsignedTxHandler) + rewardsTxHandler, ok := rewardsTxInterim.(process.UnsignedTxHandler) if !ok { return nil, nil, process.ErrWrongTypeAssertion } @@ -1411,7 +1412,16 @@ func newShardBlockProcessorAndTracker( state.AddressConverter, shardCoordinator, scResults, - feeTxHandler, + rewardsTxHandler, + ) + if err != nil { + return nil, nil, err + } + + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( + state.AccountsAdapter, + state.AddressConverter, + shardCoordinator, ) if err != nil { return nil, nil, err @@ -1421,6 +1431,7 @@ func newShardBlockProcessorAndTracker( resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, MaxTxsToRequest, @@ -1441,7 +1452,7 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, shardCoordinator, scProcessor, - feeTxHandler, + rewardsTxHandler, txTypeHandler, ) if err != nil { @@ -1470,6 +1481,7 @@ func newShardBlockProcessorAndTracker( transactionProcessor, scProcessor, scProcessor, + rewardsTxProcessor, ) if err != nil { return nil, nil, err diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 368483ae6b2..e8f681521d9 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -179,6 +179,9 @@ var ErrEmptyTxRequestTopic = errors.New("empty transaction request topic") // ErrEmptyScrRequestTopic signals that an empty smart contract result topic has been provided var ErrEmptyScrRequestTopic = errors.New("empty smart contract result request topic") +// ErrEmptyRewardTxRequestTopic signals that an empty reward transaction topic has been provided +var ErrEmptyRewardTxRequestTopic = errors.New("empty rewards transactions request topic") + // ErrEmptyMiniBlockRequestTopic signals that an empty miniblock topic has been provided var ErrEmptyMiniBlockRequestTopic = errors.New("empty miniblock request topic") diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index f61697ab968..b212e9ace89 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -11,13 +11,14 @@ import ( ) type ResolverRequestHandler struct { - resolversFinder dataRetriever.ResolversFinder - txRequestTopic string - scrRequestTopic string - mbRequestTopic string - hdrRequestTopic string - isMetaChain bool - maxTxsToRequest int + resolversFinder dataRetriever.ResolversFinder + txRequestTopic string + scrRequestTopic string + rewardTxRequestTopic string + mbRequestTopic string + hdrRequestTopic string + isMetaChain bool + maxTxsToRequest int } var log = logger.DefaultLogger() @@ -27,6 +28,7 @@ func NewShardResolverRequestHandler( finder dataRetriever.ResolversFinder, txRequestTopic string, scrRequestTopic string, + rewardTxRequestTopic string, mbRequestTopic string, hdrRequestTopic string, maxTxsToRequest int, @@ -40,6 +42,9 @@ func NewShardResolverRequestHandler( if len(scrRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyScrRequestTopic } + if len(rewardTxRequestTopic) == 0 { + return nil, dataRetriever.ErrEmptyRewardTxRequestTopic + } if len(mbRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyMiniBlockRequestTopic } @@ -51,13 +56,14 @@ func NewShardResolverRequestHandler( } rrh := &ResolverRequestHandler{ - resolversFinder: finder, - txRequestTopic: txRequestTopic, - mbRequestTopic: mbRequestTopic, - hdrRequestTopic: hdrRequestTopic, - scrRequestTopic: scrRequestTopic, - isMetaChain: false, - maxTxsToRequest: maxTxsToRequest, + resolversFinder: finder, + txRequestTopic: txRequestTopic, + mbRequestTopic: mbRequestTopic, + hdrRequestTopic: hdrRequestTopic, + scrRequestTopic: scrRequestTopic, + rewardTxRequestTopic: rewardTxRequestTopic, + isMetaChain: false, + maxTxsToRequest: maxTxsToRequest, } return rrh, nil @@ -125,6 +131,11 @@ func (rrh *ResolverRequestHandler) RequestUnsignedTransactions(destShardID uint3 rrh.requestByHashes(destShardID, scrHashes, rrh.scrRequestTopic) } +// RequestRewardTransactions requests for reward transactions from the connected peers +func (rrh *ResolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte){ + rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) +} + // RequestMiniBlock method asks for miniblocks from the connected peers func (rrh *ResolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { rrh.requestByHash(shardId, miniblockHash, rrh.mbRequestTopic) diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index e7f4f4e4da2..f6f5417f61a 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -42,7 +42,15 @@ func TestNewMetaResolverRequestHandler(t *testing.T) { func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(nil, "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + nil, + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrNilResolverFinder, err) @@ -51,7 +59,15 @@ func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyTxRequestTopic, err) @@ -60,7 +76,15 @@ func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyScrRequestTopic, err) @@ -69,7 +93,15 @@ func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyMiniBlockRequestTopic, err) @@ -78,7 +110,15 @@ func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyHeaderRequestTopic, err) @@ -87,7 +127,15 @@ func TestNewShardResolverRequestHandlerHdrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", 0) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + 0, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) @@ -96,7 +144,15 @@ func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { func TestNewShardResolverRequestHandler(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, err) assert.NotNil(t, rrh) @@ -125,6 +181,7 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso "topic", "topic", "topic", + "topic", 1, ) @@ -153,6 +210,7 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -180,6 +238,7 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -223,6 +282,7 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( "topic", "topic", "topic", + "topic", 1, ) @@ -260,6 +320,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv "topic", "topic", "topic", + "topic", 1, ) @@ -293,6 +354,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -320,6 +382,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -351,6 +414,7 @@ func TestResolverRequestHandler_RequestHeaderShouldCallRequestOnResolver(t *test "topic", "topic", "topic", + "topic", 1, ) @@ -383,6 +447,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsErrorShoul "topic", "topic", "topic", + "topic", 1, ) @@ -416,6 +481,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsAWrongReso "topic", "topic", "topic", + "topic", 1, ) @@ -449,6 +515,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardResolverFailsShouldNotP "topic", "topic", "topic", + "topic", 1, ) @@ -476,6 +543,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardShouldRequest(t *testin "topic", "topic", "topic", + "topic", 1, ) @@ -532,6 +600,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou "scrtopic", "topic", "topic", + "topic", 1, ) @@ -560,6 +629,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing "scrtopic", "topic", "topic", + "topic", 1, ) @@ -587,6 +657,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { "scrtopic", "topic", "topic", + "topic", 1, ) @@ -630,6 +701,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi "scrtopic", "topic", "topic", + "topic", 1, ) diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index f95cdbf04af..c7751bc3c93 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -93,5 +93,8 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func(blProcMock BlockProcessorMock) SetConsensusRewardAddresses ([]string){ +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(address []string) { + if blProcMock.SetConsensusRewardAddressesCalled != nil { + blProcMock.SetConsensusRewardAddressesCalled(address) + } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index c95f717ac10..86cb1e284ec 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "math/big" "math/rand" "strings" @@ -326,6 +327,7 @@ func createNetNode( resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100, @@ -363,6 +365,12 @@ func createNetNode( rewardsHandler, ) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) txProcessor, _ := transaction.NewTxProcessor( @@ -388,6 +396,7 @@ func createNetNode( txProcessor, scProcessor, scProcessor, + rewardProcessor, ) container, _ := fact.Create() diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index b9fa90ac39b..51064493e8b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "sync/atomic" "time" @@ -103,6 +104,7 @@ type TestProcessorNode struct { BlockchainHook vmcommon.BlockchainHook ArgsParser process.ArgumentsParser ScProcessor process.SmartContractProcessor + RewardsProcessor process.RewardTransactionProcessor PreProcessorsContainer process.PreProcessorsContainer ForkDetector process.ForkDetector @@ -307,6 +309,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ResolverFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.MetachainBlocksTopic, 100, @@ -351,6 +354,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScrForwarder, &mock.UnsignedTxHandlerMock{}, ) + tpn.RewardsProcessor, _=rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + ) txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) @@ -377,6 +385,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.TxProcessor, tpn.ScProcessor, tpn.ScProcessor.(process.SmartContractResultProcessor), + tpn.RewardsProcessor, ) tpn.PreProcessorsContainer, _ = fact.Create() diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 95fd18b8ff4..17e8c1e157b 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3,6 +3,8 @@ package block_test import ( "bytes" "errors" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" "reflect" "testing" "time" @@ -109,6 +111,48 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { }, } }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return &rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: big.NewInt(10), + RcvAddr: []byte("receiver"), + ShardId: 0, + }, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: big.NewInt(10), + RcvAddr: []byte("receiver"), + ShardId: 0, + }, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + }, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go new file mode 100644 index 00000000000..2aaae5b01ac --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -0,0 +1,455 @@ +package preprocess + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type rewardTxPreprocessor struct { + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter +} + +// NewRewardTxPreprocessor creates a new reward transaction preprocessor object +func NewRewardTxPreprocessor( + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), +) (*rewardTxPreprocessor, error) { + + if hasher == nil { + return nil, process.ErrNilHasher + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil { + return nil, process.ErrNilTxProcessor + } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil +} + +// waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared +func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } +} + +// IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool +func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil +} + +// RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools +func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { + if body == nil { + return process.ErrNilTxBlockBody + } + + err := rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlockType) + + return err +} + +// RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools +func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( + body block.Body, + miniBlockPool storage.Cacher, +) (int, map[int][]byte, error) { + if miniBlockPool == nil { + return 0, nil, process.ErrNilMiniBlockPool + } + + miniBlockHashes := make(map[int][]byte) + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlockType { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + miniBlockHashes[i] = restoredHash + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, miniBlockHashes, nil +} + +// ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state +func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlockType { + continue + } + if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txInfo == nil || txInfo.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txInfo.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil +} + +// SaveTxBlockToStorage saves the reward transactions from body into storage +func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlockType || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil +} + +// receivedRewardTransaction is a callback function called when a new reward transaction +// is added in the reward transactions pool +func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } +} + +// CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round +func (rtp *rewardTxPreprocessor) CreateBlockStarted() { + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() +} + +// RequestBlockTransactions request for reward transactions if missing from a block.Body +func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { + txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} + for _, txHash := range rewardTxHashesInfo.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, scrHashesInfo := range missingRewardTxsForShards { + requestedRewardTxs += len(scrHashesInfo.txHashes) + rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) + } + + return requestedRewardTxs +} + +// computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing +// from block.Body +func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { + onlyRewardTxsFromOthersBody := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlockType { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + onlyRewardTxsFromOthersBody = append(onlyRewardTxsFromOthersBody, mb) + } + + missingTxsForShard := rtp.computeExistingAndMissing( + onlyRewardTxsFromOthersBody, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlockType, + rtp.rewardTxPool, + ) + + return missingTxsForShard +} + +// processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool +func (rtp *rewardTxPreprocessor) processRewardTransaction( + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, +) error { + + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } + + txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + + return len(missingRewardTxsForMiniBlock) +} + +// computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlockType { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs +} + +// getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure +func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( + mb *block.MiniBlock, + haveTime func() bool, +) ([]*rewardTx.RewardTx, [][]byte, error) { + + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, _ := txCache.Peek(txHash) + if tmp == nil { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil +} + +// CreateAndProcessMiniBlock creates the miniblock from storage and processes the smartContractResults added into the miniblock +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { + return nil, nil +} + +// ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions +// in local cache +func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { + if miniBlock.Type != block.RewardsBlockType { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + err = process.ErrTimeIsOut + return err + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure +func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } + + return marshaledRewardTxs, nil +} + +// GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing +func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { + rewardTxPool := make(map[string]data.TransactionHandler) + + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txInfo.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + + return rewardTxPool +} diff --git a/process/unsigned/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go similarity index 99% rename from process/unsigned/rewardsHandler.go rename to process/block/preprocess/rewardsHandler.go index 87ff7310c47..5172f7cf3fc 100644 --- a/process/unsigned/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,4 +1,4 @@ -package unsigned +package preprocess import ( "math/big" diff --git a/process/unsigned/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go similarity index 99% rename from process/unsigned/rewardsHandler_test.go rename to process/block/preprocess/rewardsHandler_test.go index 41416ed8133..15457ba5a10 100644 --- a/process/unsigned/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,4 +1,4 @@ -package unsigned +package preprocess import ( "math/big" diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index ff82748ff7b..8bd4b431a91 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -669,6 +669,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -900,6 +901,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR tpm, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -2204,6 +2206,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -2886,6 +2889,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -3020,6 +3024,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -3502,6 +3507,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T txProcessorMock, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() @@ -3733,6 +3739,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := factory.Create() diff --git a/process/constants.go b/process/constants.go index 3b8677ea584..ffc987a0889 100644 --- a/process/constants.go +++ b/process/constants.go @@ -24,8 +24,8 @@ const ( SCDeployment // SCInvoking defines ID of a transaction of type smart contract call SCInvoking - // TxFee defines ID of a transaction of type tx fee - TxFee + // RewardTx defines ID of a reward transaction + RewardTx // InvalidTransaction defines unknown transaction type InvalidTransaction ) diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 364c0b3b9e2..03c18fecf82 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "errors" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "reflect" "sync" @@ -91,6 +92,36 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { }, } }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")}, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + }, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, @@ -378,6 +409,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -415,6 +447,7 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -684,6 +717,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -758,6 +792,9 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return shardedCacheMock }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, }, &mock.AddressConverterMock{}, &mock.AccountsStub{}, @@ -769,6 +806,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -1131,6 +1169,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -1286,6 +1325,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -1404,6 +1444,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -1517,6 +1558,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() @@ -1621,6 +1663,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) container, _ := preFactory.Create() diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index c163ce862cd..edfdf8cc0e5 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -3,7 +3,7 @@ package coordinator import ( "bytes" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/feeTx" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -47,9 +47,9 @@ func (tc *txTypeHandler) ComputeTransactionType(tx data.TransactionHandler) (pro return process.InvalidTransaction, err } - _, isTxfee := tx.(*feeTx.FeeTx) - if isTxfee { - return process.TxFee, nil + _, isRewardTx := tx.(*rewardTx.RewardTx) + if isRewardTx { + return process.RewardTx, nil } isEmptyAddress := tc.isDestAddressEmpty(tx) diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index 9a8feced409..6ed137b8452 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -2,7 +2,7 @@ package coordinator import ( "crypto/rand" - "github.com/ElrondNetwork/elrond-go/data/feeTx" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" @@ -242,7 +242,7 @@ func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { assert.Equal(t, process.MoveBalance, txType) } -func TestTxTypeHandler_ComputeTransactionTypeTxFee(t *testing.T) { +func TestTxTypeHandler_ComputeTransactionTypeRewardTx(t *testing.T) { t.Parallel() addrConv := &mock.AddressConverterMock{} @@ -255,13 +255,13 @@ func TestTxTypeHandler_ComputeTransactionTypeTxFee(t *testing.T) { assert.NotNil(t, tth) assert.Nil(t, err) - tx := &feeTx.FeeTx{RcvAddr: []byte("leader")} + tx := &rewardTx.RewardTx{RcvAddr: []byte("leader")} txType, err := tth.ComputeTransactionType(tx) assert.Equal(t, process.ErrWrongTransaction, err) assert.Equal(t, process.InvalidTransaction, txType) - tx = &feeTx.FeeTx{RcvAddr: generateRandomByteSlice(addrConv.AddressLen())} + tx = &rewardTx.RewardTx{RcvAddr: generateRandomByteSlice(addrConv.AddressLen())} txType, err = tth.ComputeTransactionType(tx) assert.Nil(t, err) - assert.Equal(t, process.TxFee, txType) + assert.Equal(t, process.RewardTx, txType) } diff --git a/process/errors.go b/process/errors.go index 07f897314f3..6f60fb306bb 100644 --- a/process/errors.go +++ b/process/errors.go @@ -286,9 +286,6 @@ var ErrNoTransactionInMessage = errors.New("no transaction found in received mes // ErrNilBuffer signals that a provided byte buffer is nil var ErrNilBuffer = errors.New("provided byte buffer is nil") -// ErrNilChronologyValidator signals that a nil chronology validator has been provided -var ErrNilChronologyValidator = errors.New("provided chronology validator object is nil") - // ErrNilRandSeed signals that a nil rand seed has been provided var ErrNilRandSeed = errors.New("provided rand seed is nil") @@ -358,6 +355,9 @@ var ErrNilVMOutput = errors.New("nil vm output") // ErrNilBalanceFromSC signals that balance is nil var ErrNilBalanceFromSC = errors.New("output balance from VM is nil") +// ErrNilValueFromRewardTransaction signals that the transfered value is nil +var ErrNilValueFromRewardTransaction = errors.New("transferred value is nil in reward transaction") + // ErrNilTemporaryAccountsHandler signals that temporary accounts handler is nil var ErrNilTemporaryAccountsHandler = errors.New("temporary accounts handler is nil") @@ -367,6 +367,9 @@ var ErrNotEnoughValidBlocksInStorage = errors.New("not enough valid blocks in st // ErrNilSmartContractResult signals that the smart contract result is nil var ErrNilSmartContractResult = errors.New("smart contract result is nil") +// ErrNilRewardTransaction signals that the reward transaction is nil +var ErrNilRewardTransaction = errors.New("reward transaction is nil") + // ErrInvalidDataInput signals that the data input is invalid for parsing var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") @@ -376,6 +379,9 @@ var ErrNoUnsignedTransactionInMessage = errors.New("no unsigned transactions in // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") +// ErrNilRewardTxDataPool signals that the reward transactions pool is nil +var ErrNilRewardTxDataPool = errors.New("reward transactions pool is nil") + // ErrNilUTxStorage signals that unsigned transaction storage is nil var ErrNilUTxStorage = errors.New("unsigned transactions storage is nil") @@ -403,6 +409,9 @@ var ErrNilUint64Converter = errors.New("unit64converter is nil") // ErrNilSmartContractResultProcessor signals that smart contract result processor is nil var ErrNilSmartContractResultProcessor = errors.New("nil smart contract result processor") +// ErrNilRewardsTxProcessor signals that the rewards transaction processor is nil +var ErrNilRewardsTxProcessor = errors.New("nil rewards transaction processor") + // ErrNilIntermediateProcessorContainer signals that intermediate processors container is nil var ErrNilIntermediateProcessorContainer = errors.New("intermediate processor container is nil") diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index d0c4cd669d6..8a47d0d3a51 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -9,7 +9,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -102,7 +101,7 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn } func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := unsigned.NewRewardTxHandler( + irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, ppcm.hasher, ppcm.marshalizer, diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 3fdae46a588..30de7b88534 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -13,17 +13,18 @@ import ( ) type preProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - txProcessor process.TransactionProcessor - scProcessor process.SmartContractProcessor - scResultProcessor process.SmartContractResultProcessor - accounts state.AccountsAdapter - requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + txProcessor process.TransactionProcessor + scProcessor process.SmartContractProcessor + scResultProcessor process.SmartContractResultProcessor + rewardsTxProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter + requestHandler process.RequestHandler } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -39,6 +40,7 @@ func NewPreProcessorsContainerFactory( txProcessor process.TransactionProcessor, scProcessor process.SmartContractProcessor, scResultProcessor process.SmartContractResultProcessor, + rewardsTxProcessor process.RewardTransactionProcessor, ) (*preProcessorsContainerFactory, error) { if shardCoordinator == nil { @@ -71,22 +73,26 @@ func NewPreProcessorsContainerFactory( if scResultProcessor == nil { return nil, process.ErrNilSmartContractResultProcessor } + if rewardsTxProcessor == nil { + return nil, process.ErrNilRewardsTxProcessor + } if requestHandler == nil { return nil, process.ErrNilRequestHandler } return &preProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - store: store, - marshalizer: marshalizer, - hasher: hasher, - dataPool: dataPool, - addrConverter: addrConverter, - txProcessor: txProcessor, - accounts: accounts, - scProcessor: scProcessor, - scResultProcessor: scResultProcessor, - requestHandler: requestHandler, + shardCoordinator: shardCoordinator, + store: store, + marshalizer: marshalizer, + hasher: hasher, + dataPool: dataPool, + addrConverter: addrConverter, + txProcessor: txProcessor, + accounts: accounts, + scProcessor: scProcessor, + scResultProcessor: scResultProcessor, + rewardsTxProcessor: rewardsTxProcessor, + requestHandler: requestHandler, }, nil } @@ -114,6 +120,16 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return nil, err } + preproc, err = ppcm.createRewardsTransactionPreProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.RewardsBlockType, preproc) + if err != nil { + return nil, err + } + return container, nil } @@ -146,3 +162,18 @@ func (ppcm *preProcessorsContainerFactory) createSmartContractResultPreProcessor return scrPreprocessor, err } + +func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor() (process.PreProcessor, error) { + rewardTxPreprocessor, err := preprocess.NewRewardTxPreprocessor( + ppcm.dataPool.RewardTransactions(), + ppcm.store, + ppcm.hasher, + ppcm.marshalizer, + ppcm.rewardsTxProcessor, + ppcm.shardCoordinator, + ppcm.accounts, + ppcm.requestHandler.RequestRewardTransactions, + ) + + return rewardTxPreprocessor, err +} diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index 9a21fb18740..e6936f0d4d8 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -23,6 +23,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -44,6 +45,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -65,6 +67,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -86,6 +89,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -107,6 +111,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -128,6 +133,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -149,6 +155,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -170,6 +177,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { nil, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -191,6 +199,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.TxProcessorMock{}, nil, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -212,12 +221,35 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, nil, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) assert.Nil(t, ppcm) } +func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { + t.Parallel() + + ppcm, err := NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.ChainStorerMock{}, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewPoolsHolderFake(), + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + nil, + ) + + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) + assert.Nil(t, ppcm) +} + func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { t.Parallel() @@ -233,6 +265,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -254,6 +287,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Nil(t, err) @@ -279,6 +313,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Nil(t, err) @@ -314,6 +349,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Nil(t, err) @@ -339,6 +375,12 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { }, } } + dataPool.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) { + }, + } + } ppcm, err := NewPreProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), @@ -352,12 +394,13 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, ) assert.Nil(t, err) assert.NotNil(t, ppcm) container, err := ppcm.Create() - assert.Equal(t, 2, container.Len()) + assert.Equal(t, 3, container.Len()) assert.Nil(t, err) } diff --git a/process/interface.go b/process/interface.go index 8b68426eac6..a8a67829cd1 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1,6 +1,7 @@ package process import ( + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "time" @@ -21,6 +22,11 @@ type TransactionProcessor interface { ProcessTransaction(transaction *transaction.Transaction, round uint64) error } +// RewardTransactionProcessor is the interface for reward transaction execution engine +type RewardTransactionProcessor interface { + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error +} + // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error @@ -302,6 +308,7 @@ type RequestHandler interface { RequestHeaderByNonce(shardId uint32, nonce uint64) RequestTransaction(shardId uint32, txHashes [][]byte) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) RequestMiniBlock(shardId uint32, miniblockHash []byte) RequestHeader(shardId uint32, hash []byte) } diff --git a/process/mock/requestHandlerMock.go b/process/mock/requestHandlerMock.go index 865f2db1558..6a46ab52626 100644 --- a/process/mock/requestHandlerMock.go +++ b/process/mock/requestHandlerMock.go @@ -3,6 +3,7 @@ package mock type RequestHandlerMock struct { RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) RequestHeaderHandlerCalled func(destShardID uint32, hash []byte) RequestHeaderHandlerByNonceCalled func(destShardID uint32, nonce uint64) @@ -22,6 +23,13 @@ func (rrh *RequestHandlerMock) RequestUnsignedTransactions(destShardID uint32, t rrh.RequestScrHandlerCalled(destShardID, txHashes) } +func (rrh *RequestHandlerMock) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { + if rrh.RequestRewardTxHandlerCalled == nil { + return + } + rrh.RequestRewardTxHandlerCalled(destShardID, txHashes) +} + func (rrh *RequestHandlerMock) RequestMiniBlock(shardId uint32, miniblockHash []byte) { if rrh.RequestMiniBlockHandlerCalled == nil { return diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go new file mode 100644 index 00000000000..737fa2f7b38 --- /dev/null +++ b/process/mock/rewardTxProcessorMock.go @@ -0,0 +1,17 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/rewardTx" +) + +type RewardTxProcessorMock struct { + ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error +} + +func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if scrp.ProcessRewardTransactionCalled == nil { + return nil + } + + return scrp.ProcessRewardTransactionCalled(rTx) +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go new file mode 100644 index 00000000000..a8773f3aa89 --- /dev/null +++ b/process/rewardTransaction/process.go @@ -0,0 +1,91 @@ +package rewardTransaction + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type rewardTxProcessor struct { + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator +} + +func NewRewardTxProcessor( + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, +) (*rewardTxProcessor, error) { + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + }, nil +} + +func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { + adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil +} + +// ProcessRewardTransaction updates the account state from the reward transaction +func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if rTx == nil { + return process.ErrNilRewardTransaction + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + if accHandler == nil || accHandler.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + if err != nil { + return err + } + + return nil +} diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index e80cddf7ad8..78028ecaa98 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -1,12 +1,12 @@ package transaction import ( + "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "math/big" "sync" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/process/unsigned" ) type TxProcessor *txProcessor @@ -36,12 +36,12 @@ func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { mutex.Lock() - unsigned.MinTxFee = minTxFee + preprocess.MinTxFee = minTxFee mutex.Unlock() } func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { mutex.Lock() - unsigned.MinGasPrice = minGasPrice + preprocess.MinGasPrice = minGasPrice mutex.Unlock() } diff --git a/process/transaction/process.go b/process/transaction/process.go index 9d69671e3f7..0a1d6abaae3 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -2,17 +2,17 @@ package transaction import ( "bytes" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "math/big" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/feeTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -25,7 +25,7 @@ type txProcessor struct { hasher hashing.Hasher scProcessor process.SmartContractProcessor marshalizer marshal.Marshalizer - txFeeHandler process.UnsignedTxHandler + rewardTxHandler process.UnsignedTxHandler shardCoordinator sharding.Coordinator txTypeHandler process.TxTypeHandler } @@ -38,7 +38,7 @@ func NewTxProcessor( marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, scProcessor process.SmartContractProcessor, - txFeeHandler process.UnsignedTxHandler, + rewardTxHandler process.UnsignedTxHandler, txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { @@ -60,7 +60,7 @@ func NewTxProcessor( if scProcessor == nil { return nil, process.ErrNilSmartContractProcessor } - if txFeeHandler == nil { + if rewardTxHandler == nil { return nil, process.ErrNilUnsignedTxHandler } if txTypeHandler == nil { @@ -74,7 +74,7 @@ func NewTxProcessor( marshalizer: marshalizer, shardCoordinator: shardCoordinator, scProcessor: scProcessor, - txFeeHandler: txFeeHandler, + rewardTxHandler: rewardTxHandler, txTypeHandler: txTypeHandler, }, nil } @@ -112,8 +112,8 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return txProc.processSCDeployment(tx, adrSrc, roundIndex) case process.SCInvoking: return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) - case process.TxFee: - return txProc.processAccumulatedTxFees(tx, adrSrc) + case process.RewardTx: + return txProc.processRewardTx(tx, adrSrc) } return process.ErrWrongTransaction @@ -129,8 +129,8 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st txDataLen := int64(len(tx.Data)) minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(unsigned.MinGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(unsigned.MinTxFee)) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(preprocess.MinGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(preprocess.MinTxFee)) if minFee.Cmp(cost) > 0 { return nil, process.ErrNotEnoughFeeInTransactions @@ -149,11 +149,11 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st return cost, nil } -func (txProc *txProcessor) processAccumulatedTxFees( +func (txProc *txProcessor) processRewardTx( tx data.TransactionHandler, adrSrc state.AddressContainer, ) error { - currTxFee, ok := tx.(*feeTx.FeeTx) + rTx, ok := tx.(*rewardTx.RewardTx) if !ok { return process.ErrWrongTypeAssertion } @@ -166,14 +166,14 @@ func (txProc *txProcessor) processAccumulatedTxFees( // is sender address in node shard if acntSrc != nil { op := big.NewInt(0) - err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, currTxFee.Value)) + err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) if err != nil { return err } } - if currTxFee.ShardId == txProc.shardCoordinator.SelfId() { - txProc.txFeeHandler.AddRewardTxFromBlock(currTxFee) + if rTx.ShardId == txProc.shardCoordinator.SelfId() { + txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) } return nil @@ -211,7 +211,7 @@ func (txProc *txProcessor) processMoveBalance( } } - txProc.txFeeHandler.ProcessTransactionFee(txFee) + txProc.rewardTxHandler.ProcessTransactionFee(txFee) return nil } From 9db3d5e2fc43f5cabe194584d9e973367fc2a52f Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 26 Aug 2019 15:56:24 +0300 Subject: [PATCH 071/234] cmd, process, config: reward community and burn addresses from config --- cmd/node/config/config.toml | 4 + cmd/node/factory/structs.go | 23 +- cmd/node/main.go | 3 + config/config.go | 9 +- config/tomlConfig_test.go | 11 + process/block/baseProcess_test.go | 152 +++------ process/block/shardblock_test.go | 340 +++++--------------- process/coordinator/process_test.go | 140 +++----- process/coordinator/transactionType_test.go | 5 +- process/mock/blockProcessorMock.go | 4 +- 10 files changed, 227 insertions(+), 464 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index e12547035e1..b1970cd0f07 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -232,3 +232,7 @@ Port = 123 Timeout = 0 # Setting 0 means 'use default value' Version = 0 # Setting 0 means 'use default value' + +[RewardConfig] + CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" + BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 8b8c46d82d3..7b6d7770718 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "io" "math/big" "path/filepath" @@ -59,6 +58,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" processSync "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/process/track" @@ -403,6 +403,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, c type processComponentsFactoryArgs struct { genesisConfig *sharding.Genesis + rewardsConfig *config.RewardConfig nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator @@ -418,6 +419,7 @@ type processComponentsFactoryArgs struct { // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( genesisConfig *sharding.Genesis, + rewardsConfig *config.RewardConfig, nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, @@ -431,6 +433,7 @@ func NewProcessComponentsFactoryArgs( ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ genesisConfig: genesisConfig, + rewardsConfig: rewardsConfig, nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, @@ -498,6 +501,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err resolversFinder, args.shardCoordinator, args.nodesCoordinator, + args.rewardsConfig, args.data, args.core, args.state, @@ -914,7 +918,7 @@ func createShardDataPoolFromConfig( rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTxDataPool)) if err != nil { - log.Info("error creating transaction fees pool") + log.Info("error creating reward transaction pool") return nil, err } @@ -1094,6 +1098,10 @@ func createNetMessenger( return nm, nil } +func createRewardParametersFromConfig() { + +} + func newInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, @@ -1286,6 +1294,7 @@ func newBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, + rewardsConfig *config.RewardConfig, data *Data, core *Core, state *State, @@ -1295,9 +1304,12 @@ func newBlockProcessorAndTracker( coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - //TODO: replace with correct community address and invalid burnAddress - communityAddress, _ := hex.DecodeString("1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420") - burnAddress, _ := hex.DecodeString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + if rewardsConfig.CommunityAddress == "" || rewardsConfig.BurnAddress == ""{ + return nil, nil, errors.New("rewards configuration missing") + } + + communityAddress, _ := hex.DecodeString(rewardsConfig.CommunityAddress) + burnAddress, _ := hex.DecodeString(rewardsConfig.BurnAddress) // TODO: construct this correctly on the PR specialAddressHolder, err := address.NewSpecialAddressHolder( @@ -1309,6 +1321,7 @@ func newBlockProcessorAndTracker( return nil, nil, err } + // TODO: remove nodesConfig as no longer needed with nodes coordinator available if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return newShardBlockProcessorAndTracker( resolversFinder, diff --git a/cmd/node/main.go b/cmd/node/main.go index c539979cbd7..62a9ab9f5ef 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -612,8 +612,11 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } + rewardConfig := &generalConfig.RewardConfig + processArgs := factory.NewProcessComponentsFactoryArgs( genesisConfig, + rewardConfig, nodesConfig, syncer, shardCoordinator, diff --git a/config/config.go b/config/config.go index 32fb434ff54..639d17eeed0 100644 --- a/config/config.go +++ b/config/config.go @@ -55,6 +55,12 @@ type NTPConfig struct { Version int } +// RewardConfig will hold the reward configuration +type RewardConfig struct { + CommunityAddress string + BurnAddress string +} + // Config will hold the entire application configuration parameters type Config struct { MiniBlocksStorage StorageConfig @@ -99,7 +105,8 @@ type Config struct { Consensus TypeConfig Explorer ExplorerConfig - NTPConfig NTPConfig + NTPConfig NTPConfig + RewardConfig RewardConfig } // NodeConfig will hold basic p2p settings diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 460a36f97e2..11c1f9cd383 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -32,6 +32,9 @@ func TestTomlParser(t *testing.T) { consensusType := "bn" + communityAddress := "community" + burnAddress := "burnAddress" + cfgExpected := Config{ MiniBlocksStorage: StorageConfig{ Cache: CacheConfig{ @@ -71,6 +74,10 @@ func TestTomlParser(t *testing.T) { Consensus: TypeConfig{ Type: consensusType, }, + RewardConfig: RewardConfig{ + CommunityAddress: communityAddress, + BurnAddress: burnAddress, + }, } testString := ` @@ -107,6 +114,10 @@ func TestTomlParser(t *testing.T) { [Consensus] Type = "` + consensusType + `" + +[RewardConfig] + CommunityAddress = "` + communityAddress + `" + BurnAddress = "` + burnAddress + `" ` cfg := Config{} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 17e8c1e157b..eae98a048ac 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -49,110 +49,58 @@ func generateTestUnit() storage.Storer { return storer } +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) (func() dataRetriever.ShardedDataCacherNotifier ) { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + func initDataPool(testHash []byte) *mock.PoolsHolderStub { + rewardTx := &rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: big.NewInt(10), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + txCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + unsignedTxCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + rewardTransactionsCalled := createShardedDataChacherNotifier(rewardTx, testHash) + sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &rewardTx.RewardTx{ - Round: 1, - Epoch: 0, - Value: big.NewInt(10), - RcvAddr: []byte("receiver"), - ShardId: 0, - }, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &rewardTx.RewardTx{ - Round: 1, - Epoch: 0, - Value: big.NewInt(10), - RcvAddr: []byte("receiver"), - ShardId: 0, - }, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxCalled, + RewardTransactionsCalled: rewardTransactionsCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 8bd4b431a91..611287cf46d 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -83,6 +83,58 @@ func initBlockHeader(prevHash []byte, rootHash []byte, mbHdrs []block.MiniBlockH return hdr } +type methodFlags struct { + revertToSnapshotCalled bool + rootHashCalled bool +} + +func defaultShardProcessor() (process.BlockProcessor, *methodFlags, error) { + // set accounts not dirty + flags := &methodFlags{} + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + flags.revertToSnapshotCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + flags.rootHashCalled = true + return []byte("rootHash"), nil + } + + accStub := initAccountsMock() + accStub.JournalLenCalled = journalLen + accStub.RevertToSnapshotCalled = revertToSnapshot + accStub.RootHashCalled = rootHashCalled + + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accStub, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + return sp, flags, err +} + //------- NewBlockProcessor func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { @@ -377,24 +429,8 @@ func TestNewShardProcessor_NilUint64Converter(t *testing.T) { func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, err := defaultShardProcessor() assert.Nil(t, err) assert.NotNil(t, sp) } @@ -403,24 +439,8 @@ func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() blk := make(block.Body, 0) err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) assert.Equal(t, process.ErrNilBlockChain, err) @@ -428,24 +448,8 @@ func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() body := make(block.Body, 0) err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) assert.Equal(t, process.ErrNilBlockHeader, err) @@ -453,48 +457,16 @@ func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) assert.Equal(t, process.ErrNilBlockBody, err) } func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() blk := make(block.Body, 0) err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) assert.Equal(t, process.ErrNilHaveTimeHandler, err) @@ -543,7 +515,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{} hdr := block.Header{ @@ -563,40 +535,8 @@ func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { TxHashes: txHashes, } body = append(body, &miniblock) - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { return nil } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() // should return err err := sp.ProcessBlock(blkc, &hdr, body, haveTime) @@ -711,24 +651,8 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() hdr := &block.Header{ Nonce: 0, Round: 1, @@ -745,24 +669,8 @@ func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() hdr := &block.Header{ Nonce: 0, Round: 1, @@ -779,31 +687,8 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, _, _ := defaultShardProcessor() hdr := &block.Header{ Nonce: 1, Round: 1, @@ -947,7 +832,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ @@ -984,58 +869,20 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, - RootHash: []byte("rootHash"), + RootHash: []byte("rootHashX"), MiniBlockHeaders: mbHdrs, } - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHashX"), nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - + sp, flags, _ := defaultShardProcessor() // should return err err := sp.ProcessBlock(blkc, &hdr, body, haveTime) assert.Equal(t, process.ErrRootStateMissmatch, err) - assert.True(t, wasCalled) + assert.True(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ @@ -1076,42 +923,13 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { RootHash: rootHash, MiniBlockHeaders: mbHdrs, } - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + sp, flags, _ := defaultShardProcessor() // should return err err := sp.ProcessBlock(blkc, &hdr, body, haveTime) assert.Nil(t, err) - assert.False(t, wasCalled) + assert.False(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 03c18fecf82..0d5dd2160da 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/hex" "errors" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "reflect" "sync" @@ -15,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -30,98 +30,55 @@ import ( "github.com/stretchr/testify/assert" ) +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) (func() dataRetriever.ShardedDataCacherNotifier ) { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + func initDataPool(testHash []byte) *mock.PoolsHolderStub { + tx := &transaction.Transaction{Nonce: 10} + sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} + rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} + + txCalled := createShardedDataChacherNotifier(tx, testHash) + unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) + rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) + sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10, Data: id}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2"), testHash} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxHandler, + RewardTransactionsCalled: rewardTxCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, @@ -187,6 +144,7 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { } return sdp } + func containsHash(txHashes [][]byte, hash []byte) bool { for _, txHash := range txHashes { if bytes.Equal(hash, txHash) { diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index 6ed137b8452..af177bf6cfb 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -2,14 +2,15 @@ package coordinator import ( "crypto/rand" + "math/big" + "testing" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" - "math/big" - "testing" ) func TestNewTxTypeHandler_NilAddrConv(t *testing.T) { diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 0156833536b..db35011cfcc 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,6 +67,6 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string){ +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string) { panic("implement me") -} \ No newline at end of file +} From 7729481923286075354563152769a9d9990437f2 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 27 Aug 2019 09:22:22 +0300 Subject: [PATCH 072/234] dataRetriever, process, integrationTests: add rewards transaction interceptors, fixes on resolvers --- .../shard/resolversContainerFactory.go | 19 ++- .../shard/resolversContainerFactory_test.go | 6 +- integrationTests/testInitializer.go | 1 + process/errors.go | 6 + .../shard/interceptorsContainerFactory.go | 55 +++++++ .../intercepteRewardTransaction.go | 149 +++++++++++++++++ process/rewardTransaction/interceptor.go | 151 ++++++++++++++++++ 7 files changed, 385 insertions(+), 2 deletions(-) create mode 100644 process/rewardTransaction/intercepteRewardTransaction.go create mode 100644 process/rewardTransaction/interceptor.go diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 3b025c1b21f..fc5fc678022 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -74,7 +74,11 @@ func NewResolversContainerFactory( func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer, error) { container := containers.NewResolversContainer() - keys, resolverSlice, err := rcf.generateTxResolvers(factory.TransactionTopic, dataRetriever.TransactionUnit, rcf.dataPools.Transactions()) + keys, resolverSlice, err := rcf.generateTxResolvers( + factory.TransactionTopic, + dataRetriever.TransactionUnit, + rcf.dataPools.Transactions(), + ) if err != nil { return nil, err } @@ -96,6 +100,19 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.RewardsTransactionTopic, + dataRetriever.RewardTransactionUnit, + rcf.dataPools.RewardTransactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + keys, resolverSlice, err = rcf.generateHdrResolver() if err != nil { return nil, err diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index 272c76093fa..aee73e043d8 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -69,6 +69,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -415,13 +418,14 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverSCRs := noOfShards numResolverTxs := noOfShards + numResolverRewardTxs := noOfShards numResolverHeaders := 1 numResolverMiniBlocks := noOfShards numResolverPeerChanges := 1 numResolverMetachainShardHeaders := 1 numResolverMetaBlockHeaders := 1 totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs assert.Equal(t, totalResolvers, container.Len()) } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 096ec0901e8..d66d44a08a3 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -168,6 +168,7 @@ func CreateShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) for i := uint32(0); i < numOfShards; i++ { diff --git a/process/errors.go b/process/errors.go index 6f60fb306bb..130568c1c96 100644 --- a/process/errors.go +++ b/process/errors.go @@ -376,6 +376,9 @@ var ErrInvalidDataInput = errors.New("data input is invalid to create key, value // ErrNoUnsignedTransactionInMessage signals that message does not contain required data var ErrNoUnsignedTransactionInMessage = errors.New("no unsigned transactions in message") +// ErrNoRewardTransactionInMessage signals that message does not contain required data +var ErrNoRewardTransactionInMessage = errors.New("no reward transactions in message") + // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") @@ -385,6 +388,9 @@ var ErrNilRewardTxDataPool = errors.New("reward transactions pool is nil") // ErrNilUTxStorage signals that unsigned transaction storage is nil var ErrNilUTxStorage = errors.New("unsigned transactions storage is nil") +// ErrNilRewardsTxStorage signals that rewards transaction storage is nil +var ErrNilRewardsTxStorage = errors.New("reward transactions storage is nil") + // ErrNilScAddress signals that a nil smart contract address has been provided var ErrNilScAddress = errors.New("nil SC address") diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 0ff8a69a93f..a1e5a2b1dee 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/dataValidators" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" @@ -234,6 +235,60 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier strin return icf.createTopicAndAssignHandler(identifier, interceptor, true) } +//------- Reward transactions interceptors + +func (icf *interceptorsContainerFactory) generateRewardTxInterceptors() ([]string, []process.Interceptor, error) { + shardC := icf.shardCoordinator + + noOfShards := shardC.NumberOfShards() + + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) + + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } + + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } + + identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } + + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + + return keys, interceptorSlice, nil +} + +func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(identifier string) (process.Interceptor, error) { + rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) + + interceptor, err := rewardTransaction.NewRewardTxInterceptor( + icf.marshalizer, + icf.dataPool.RewardTransactions(), + rewardTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator, + ) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) +} + //------- Unsigned transactions interceptors func (icf *interceptorsContainerFactory) generateUnsignedTxsInterceptors() ([]string, []process.Interceptor, error) { diff --git a/process/rewardTransaction/intercepteRewardTransaction.go b/process/rewardTransaction/intercepteRewardTransaction.go new file mode 100644 index 00000000000..8a97f316786 --- /dev/null +++ b/process/rewardTransaction/intercepteRewardTransaction.go @@ -0,0 +1,149 @@ +package rewardTransaction + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// InterceptedRewardTransaction holds and manages a transaction based struct with extended functionality +type InterceptedRewardTransaction struct { + rTx *rewardTx.RewardTx + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConv state.AddressConverter + coordinator sharding.Coordinator + hash []byte + rcvShard uint32 + sndShard uint32 + isAddressedToOtherShards bool +} + +// NewInterceptedRewardTransaction returns a new instance of InterceptedRewardTransaction +func NewInterceptedRewardTransaction( + rewardTxBuff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConv state.AddressConverter, + coordinator sharding.Coordinator, +) (*InterceptedRewardTransaction, error) { + + if rewardTxBuff == nil { + return nil, process.ErrNilBuffer + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if addrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rTx := &rewardTx.RewardTx{} + err := marshalizer.Unmarshal(rTx, rewardTxBuff) + if err != nil { + return nil, err + } + + inRewardTx := &InterceptedRewardTransaction{ + rTx: rTx, + marshalizer: marshalizer, + hasher: hasher, + addrConv: addrConv, + coordinator: coordinator, + } + + err = inRewardTx.processFields(rewardTxBuff) + if err != nil { + return nil, err + } + + err = inRewardTx.integrity() + if err != nil { + return nil, err + } + + err = inRewardTx.verifyIfNotarized(inRewardTx.hash) + if err != nil { + return nil, err + } + + return inRewardTx, nil +} + +func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) error { + inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) + + rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) + if err != nil { + return process.ErrInvalidRcvAddr + } + + inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) + inRTx.sndShard = inRTx.rTx.ShardId + + inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && + inRTx.sndShard != inRTx.coordinator.SelfId() + + return nil +} + +// integrity checks for not nil fields and negative value +func (inRTx *InterceptedRewardTransaction) integrity() error { + if len(inRTx.rTx.RcvAddr) == 0 { + return process.ErrNilRcvAddr + } + + if inRTx.rTx.Value == nil { + return process.ErrNilValue + } + + if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { + return process.ErrNegativeValue + } + + return nil +} + +// verifyIfNotarized checks if the rewardTx was already notarized +func (inRTx *InterceptedRewardTransaction) verifyIfNotarized(rTxBuff []byte) error { + // TODO: implement this for flood protection purposes + // could verify if the epoch/round is behind last committed metachain block + return nil +} + +// RcvShard returns the receiver shard +func (inRTx *InterceptedRewardTransaction) RcvShard() uint32 { + return inRTx.rcvShard +} + +// SndShard returns the sender shard +func (inRTx *InterceptedRewardTransaction) SndShard() uint32 { + return inRTx.sndShard +} + +// IsAddressedToOtherShards returns true if this transaction is not meant to be processed by the node from this shard +func (inRTx *InterceptedRewardTransaction) IsAddressedToOtherShards() bool { + return inRTx.isAddressedToOtherShards +} + +// RewardTransaction returns the reward transaction pointer that actually holds the data +func (inRTx *InterceptedRewardTransaction) RewardTransaction() data.TransactionHandler { + return inRTx.rTx +} + +// Hash gets the hash of this transaction +func (inRTx *InterceptedRewardTransaction) Hash() []byte { + return inRTx.hash +} diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go new file mode 100644 index 00000000000..a93ab345953 --- /dev/null +++ b/process/rewardTransaction/interceptor.go @@ -0,0 +1,151 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.DefaultLogger() + +// RewardTxInterceptor is used for intercepting reward transactions and storing them into a datapool +type RewardTxInterceptor struct { + marshalizer marshal.Marshalizer + rewardTxPool dataRetriever.ShardedDataCacherNotifier + rewardTxStorer storage.Storer + addrConverter state.AddressConverter + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + broadcastCallbackHandler func(buffToSend []byte) +} + +// NewRewardTxInterceptor hooks a new interceptor for reward transactions +func NewRewardTxInterceptor( + marshalizer marshal.Marshalizer, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + rewardTxStorer storage.Storer, + addrConverter state.AddressConverter, + hasher hashing.Hasher, + shardCoordinator sharding.Coordinator, +) (*RewardTxInterceptor, error) { + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if rewardTxPool == nil { + return nil, process.ErrNilRewardTxDataPool + } + if rewardTxStorer == nil { + return nil, process.ErrNilRewardsTxStorage + } + if addrConverter == nil { + return nil, process.ErrNilAddressConverter + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rewardTxIntercept := &RewardTxInterceptor{ + marshalizer: marshalizer, + rewardTxPool: rewardTxPool, + rewardTxStorer: rewardTxStorer, + hasher: hasher, + addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + } + + return rewardTxIntercept, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + if message == nil { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + rewardTxsBuff := make([][]byte, 0) + err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) + if err != nil { + return err + } + if len(rewardTxsBuff) == 0 { + return process.ErrNoRewardTransactionInMessage + } + + filteredRTxBuffs := make([][]byte, 0) + lastErrEncountered := error(nil) + for _, rewardTxBuff := range rewardTxsBuff { + rewardTxIntercepted, err := NewInterceptedRewardTransaction( + rewardTxBuff, + rti.marshalizer, + rti.hasher, + rti.addrConverter, + rti.shardCoordinator) + + if err != nil { + lastErrEncountered = err + continue + } + + //reward tx is validated, add it to filtered out reward txs + filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) + if rewardTxIntercepted.IsAddressedToOtherShards() { + log.Debug("intercepted reward transaction is for other shards") + + continue + } + + go rti.processRewardTransaction(rewardTxIntercepted) + } + + var buffToSend []byte + filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil + if filteredOutRTxsNeedToBeSend { + buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) + if err != nil { + return err + } + } + + if rti.broadcastCallbackHandler != nil { + rti.broadcastCallbackHandler(buffToSend) + } + + return lastErrEncountered +} + +// SetBroadcastCallback sets the callback method to send filtered out message +func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend []byte)) { + rti.broadcastCallbackHandler = callback +} + +func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { + //TODO should remove this as it is expensive + err := rti.rewardTxStorer.Has(rTx.Hash()) + isRTxInStorage := err == nil + if isRTxInStorage { + log.Debug("intercepted reward tx already processed") + return + } + + cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) + rti.rewardTxPool.AddData( + rTx.Hash(), + rTx.RewardTransaction(), + cacherIdentifier, + ) +} From afad17f8871b35c9e10319b1b9b1072b1ef661ef Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 28 Aug 2019 13:23:49 +0300 Subject: [PATCH 073/234] process: interceptors for rewards and some fixes --- cmd/node/factory/structs.go | 2 +- config/config.go | 2 +- integrationTests/testInitializer.go | 2 +- integrationTests/testProcessorNode.go | 13 +++++----- .../testProcessorNodeWithMultisigner.go | 7 +++++ .../block/preprocess/rewardTxPreProcessor.go | 12 ++++----- process/block/preprocess/rewardsHandler.go | 24 +++++++++++------ .../block/preprocess/rewardsHandler_test.go | 26 +++++++++++++++++++ process/block/shardblock.go | 5 ++++ process/coordinator/process.go | 18 +++++++++++-- .../intermediateProcessorsContainerFactory.go | 5 ++-- 11 files changed, 89 insertions(+), 27 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7b6d7770718..88b353942db 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -916,7 +916,7 @@ func createShardDataPoolFromConfig( return nil, err } - rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTxDataPool)) + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) if err != nil { log.Info("error creating reward transaction pool") return nil, err diff --git a/config/config.go b/config/config.go index 639d17eeed0..df205515ff2 100644 --- a/config/config.go +++ b/config/config.go @@ -86,7 +86,7 @@ type Config struct { BlockHeaderNoncesDataPool CacheConfig TxDataPool CacheConfig UnsignedTransactionDataPool CacheConfig - RewardTxDataPool CacheConfig + RewardTransactionDataPool CacheConfig MetaBlockBodyDataPool CacheConfig MiniBlockHeaderHashesDataPool CacheConfig diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index d66d44a08a3..659d142b7bc 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1133,7 +1133,7 @@ func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i][:31])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 51064493e8b..44f3ab4f927 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -81,7 +81,7 @@ type TestProcessorNode struct { Messenger p2p.Messenger OwnAccount *TestWalletAccount - NodeKeys *TestKeyPair + NodeKeys *TestKeyPair ShardDataPool dataRetriever.PoolsHolder MetaDataPool dataRetriever.MetaPoolsHolder @@ -188,6 +188,7 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 } func (tpn *TestProcessorNode) initTestNode() { + tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{} tpn.initStorage() tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() @@ -327,7 +328,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, TestHasher, TestAddressConverter, - &mock.SpecialAddressHandlerMock{}, + tpn.SpecialAddressHandler, tpn.Storage, ) tpn.InterimProcContainer, _ = interimProcFactory.Create() @@ -354,11 +355,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScrForwarder, &mock.UnsignedTxHandlerMock{}, ) - tpn.RewardsProcessor, _=rewardTransaction.NewRewardTxProcessor( + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( tpn.AccntState, TestAddressConverter, tpn.ShardCoordinator, - ) + ) txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) @@ -433,7 +434,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { tpn.ForkDetector, tpn.ShardCoordinator, tpn.NodesCoordinator, - &mock.SpecialAddressHandlerMock{}, + tpn.SpecialAddressHandler, TestHasher, TestMarshalizer, tpn.Storage, @@ -451,7 +452,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { tpn.AccntState, tpn.ShardCoordinator, tpn.NodesCoordinator, - &mock.SpecialAddressHandlerMock{}, + tpn.SpecialAddressHandler, tpn.ForkDetector, tpn.BlockTracker, tpn.GenesisBlocks, diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 743e966f52b..566f5b58204 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -121,6 +121,13 @@ func ProposeBlockWithConsensusSignature( fmt.Println("Error getting the validators public keys: ", err) } + adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + + // set the consensus reward addresses + for _, node := range nodesMap[shardId]{ + node.BlockProcessor.SetConsensusRewardAddresses(adddresses) + } + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) // first node is block proposer body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 2aaae5b01ac..0f13f7a3d1a 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -278,20 +278,20 @@ func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { - onlyRewardTxsFromOthersBody := block.Body{} + rewardTxs := block.Body{} for _, mb := range body { if mb.Type != block.RewardsBlockType { continue } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } + //if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + // continue + //} - onlyRewardTxsFromOthersBody = append(onlyRewardTxsFromOthersBody, mb) + rewardTxs = append(rewardTxs, mb) } missingTxsForShard := rtp.computeExistingAndMissing( - onlyRewardTxsFromOthersBody, + rewardTxs, &rtp.rewardTxsForBlock, rtp.chReceivedAllRewardTxs, block.RewardsBlockType, diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 5172f7cf3fc..a104b4559b5 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,6 +1,7 @@ package preprocess import ( + "github.com/ElrondNetwork/elrond-go/sharding" "math/big" "sync" @@ -28,11 +29,12 @@ const burnPercentage = 0.5 // 1 = 100%, 0 = 0% var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mut sync.Mutex - accumulatedFees *big.Int + address process.SpecialAddressHandler + shardCoordinator sharding.Coordinator + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mut sync.Mutex + accumulatedFees *big.Int rewardTxsFromBlock map[string]*rewardTx.RewardTx } @@ -40,12 +42,16 @@ type rewardsHandler struct { // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( address process.SpecialAddressHandler, + shardCoordinator sharding.Coordinator, hasher hashing.Hasher, marshalizer marshal.Marshalizer, ) (*rewardsHandler, error) { if address == nil { return nil, process.ErrNilSpecialAddressHandler } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } if hasher == nil { return nil, process.ErrNilHasher } @@ -54,9 +60,10 @@ func NewRewardTxHandler( } rtxh := &rewardsHandler{ - address: address, - hasher: hasher, - marshalizer: marshalizer, + address: address, + shardCoordinator: shardCoordinator, + hasher: hasher, + marshalizer: marshalizer, } rtxh.accumulatedFees = big.NewInt(0) rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) @@ -237,6 +244,7 @@ func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.Transactio rTx := &rewardTx.RewardTx{} rTx.Value = rewardValue rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() consensusRewardTxs = append(consensusRewardTxs, rTx) } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 15457ba5a10..b16f5272c44 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -16,6 +16,7 @@ func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { th, err := NewRewardTxHandler( nil, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -24,11 +25,26 @@ func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { assert.Equal(t, process.ErrNilSpecialAddressHandler, err) } +func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { + t.Parallel() + + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + func TestNewRewardTxHandler_NilHasher(t *testing.T) { t.Parallel() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), nil, &mock.MarshalizerMock{}, ) @@ -42,6 +58,7 @@ func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, nil, ) @@ -55,6 +72,7 @@ func TestNewRewardTxHandler_ValsOk(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -68,6 +86,7 @@ func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -84,6 +103,7 @@ func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -106,6 +126,7 @@ func TestRewardTxHandlerAddTxFeeFromBlock(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -128,6 +149,7 @@ func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -150,6 +172,7 @@ func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -179,6 +202,7 @@ func TestRewardTxHandlerVerifyCreatedUTxs(t *testing.T) { addr := &mock.SpecialAddressHandlerMock{} th, err := NewRewardTxHandler( addr, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -228,6 +252,7 @@ func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -251,6 +276,7 @@ func TestRewardTxHandlerVerifyInterMiniBlocks(t *testing.T) { addr := &mock.SpecialAddressHandlerMock{} th, err := NewRewardTxHandler( addr, + mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c48ed008874..6e21e0ba668 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -444,10 +444,15 @@ func (sp *shardProcessor) indexBlockIfNeeded( txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlockType) + for hash, tx := range scPool { txPool[hash] = tx } + for hash, tx := range rewardPool { + txPool[hash] = tx + } go sp.core.Indexer().SaveBlock(body, header, txPool) } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 9c1f82a9944..d1005c39d85 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -232,12 +232,26 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error wg.Wait() - intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) + intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) + if intermediatePreprocSC == nil { + return errFound + } + + err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) + + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + + intermediatePreproc := tc.getInterimProcessor(block.RewardsBlockType) if intermediatePreproc == nil { return errFound } - err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() + err = intermediatePreproc.SaveCurrentIntermediateTxToStorage() if err != nil { log.Debug(err.Error()) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 8a47d0d3a51..8666b0f66fb 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -18,7 +18,7 @@ type intermediateProcessorsContainerFactory struct { hasher hashing.Hasher addrConverter state.AddressConverter specialAddressHandler process.SpecialAddressHandler - store dataRetriever.StorageService + store dataRetriever.StorageService } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -56,7 +56,7 @@ func NewIntermediateProcessorsContainerFactory( hasher: hasher, addrConverter: addrConverter, specialAddressHandler: specialAddressHandler, - store: store, + store: store, }, nil } @@ -103,6 +103,7 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, + ppcm.shardCoordinator, ppcm.hasher, ppcm.marshalizer, ) From 8be70de951fc21ed052c10229011829f062cc469 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 2 Sep 2019 17:39:19 +0300 Subject: [PATCH 074/234] process, integrationTests: fixes rewards preprocessor/processor/intermediate handler Integration tests and unit tests adaptations --- cmd/node/factory/structs.go | 31 ++- .../mock/specialAddressHandlerMock.go | 6 +- .../interceptedHeadersSigVerification_test.go | 6 +- .../smartContract/testInitilalizer.go | 17 +- .../interceptedResolvedBulkTx_test.go | 2 +- .../transaction/interceptedResolvedTx_test.go | 81 +++++++ integrationTests/testInitializer.go | 30 +-- integrationTests/testProcessorNode.go | 28 ++- .../testProcessorNodeWithMultisigner.go | 4 +- .../block/preprocess/rewardTxPreProcessor.go | 15 +- process/block/preprocess/rewardsHandler.go | 224 +++++++++++++----- .../block/preprocess/rewardsHandler_test.go | 155 +++++++----- process/block/preprocess/transactions_test.go | 33 +++ process/block/shardblock_test.go | 6 +- process/coordinator/process.go | 32 ++- process/coordinator/process_test.go | 7 +- process/errors.go | 8 +- .../shard/interceptorsContainerFactory.go | 10 + .../interceptorsContainerFactory_test.go | 8 +- .../intermediateProcessorsContainerFactory.go | 11 +- ...rmediateProcessorsContainerFactory_test.go | 31 ++- process/interface.go | 7 +- process/mock/rewardTxProcessorMock.go | 9 + ...ion.go => interceptedRewardTransaction.go} | 0 process/rewardTransaction/process.go | 46 +++- process/smartContract/process.go | 4 +- process/transaction/process.go | 44 +--- 27 files changed, 584 insertions(+), 271 deletions(-) rename process/rewardTransaction/{intercepteRewardTransaction.go => interceptedRewardTransaction.go} (100%) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 88b353942db..da59259d854 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "io" "math/big" "path/filepath" @@ -58,7 +59,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" processSync "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/process/track" @@ -1304,7 +1304,7 @@ func newBlockProcessorAndTracker( coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - if rewardsConfig.CommunityAddress == "" || rewardsConfig.BurnAddress == ""{ + if rewardsConfig.CommunityAddress == "" || rewardsConfig.BurnAddress == "" { return nil, nil, errors.New("rewards configuration missing") } @@ -1383,6 +1383,10 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + if err != nil { + return nil, nil, err + } + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, core.Marshalizer, @@ -1390,6 +1394,7 @@ func newShardBlockProcessorAndTracker( state.AddressConverter, specialAddressHandler, data.Store, + data.Datapool, ) if err != nil { return nil, nil, err @@ -1410,7 +1415,7 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - rewardsTxHandler, ok := rewardsTxInterim.(process.UnsignedTxHandler) + rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) if !ok { return nil, nil, process.ErrWrongTypeAssertion } @@ -1431,15 +1436,6 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( - state.AccountsAdapter, - state.AddressConverter, - shardCoordinator, - ) - if err != nil { - return nil, nil, err - } - requestHandler, err := requestHandlers.NewShardResolverRequestHandler( resolversFinder, factory.TransactionTopic, @@ -1453,6 +1449,16 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( + state.AccountsAdapter, + state.AddressConverter, + shardCoordinator, + rewardsTxInterim, + ) + if err != nil { + return nil, nil, err + } + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) if err != nil { return nil, nil, err @@ -1594,6 +1600,7 @@ func newMetaBlockProcessorAndTracker( return metaProcessor, blockTracker, nil } + func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { return storageUnit.CacheConfig{ Size: cfg.Size, diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 407b3a6c7e0..9f3d28f9e40 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -22,7 +22,7 @@ func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { if sh.BurnAddressCalled == nil { - return []byte("burn") + return []byte("burn0000000000000000000000000000") } return sh.BurnAddressCalled() @@ -30,7 +30,7 @@ func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { if sh.ElrondCommunityAddressCalled == nil { - return []byte("elrond") + return []byte("elrond00000000000000000000000000") } return sh.ElrondCommunityAddressCalled() @@ -38,7 +38,7 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { if sh.LeaderAddressCalled == nil { - return []byte("leader") + return []byte("leader0000000000000000000000000000") } return sh.LeaderAddressCalled() diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index fb718f586cc..651638218fe 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -const broadcastDelay = 2* time.Second +const broadcastDelay = 2 * time.Second func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { if testing.Short() { @@ -58,7 +58,7 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing round := uint64(1) nonce := uint64(1) - body, header, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) nodesMap[0][0].BroadcastBlock(body, header) @@ -127,7 +127,7 @@ func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { round := uint64(1) nonce := uint64(1) - body, header, _ := integrationTests.ProposeBlockWithConsensusSignature( + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( sharding.MetachainShardId, nodesMap, round, diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 86cb1e284ec..bc86249d949 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -207,6 +207,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) for i := uint32(0); i < numOfShards; i++ { @@ -340,12 +341,18 @@ func createNetNode( testAddressConverter, &mock.SpecialAddressHandlerMock{}, store, + dPool, ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlockType) - rewardsHandler, _ := rewardsInter.(process.UnsignedTxHandler) - + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + rewardsInter, + ) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) vmContainer := &mock.VMContainerMock{ GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { @@ -365,12 +372,6 @@ func createNetNode( rewardsHandler, ) - rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( - accntAdapter, - addrConv, - shardCoordinator, - ) - txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) txProcessor, _ := transaction.NewTxProcessor( diff --git a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go index 50584434125..7e2b1cbacf2 100644 --- a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go +++ b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go @@ -286,7 +286,7 @@ func TestNode_InMultiShardEnvRequestTxsShouldRequireOnlyFromTheOtherShard(t *tes recvTxs := make(map[int]map[string]struct{}) mutRecvTxs := sync.Mutex{} for i := 0; i < nodesPerShard; i++ { - dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i) + dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( uint32(maxShards), diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 31c68ad6c33..3e527b4351d 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -2,6 +2,7 @@ package transaction import ( "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "reflect" "testing" @@ -99,3 +100,83 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { assert.Fail(t, "timeout") } } + +func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + //Step 1. Generate a signed transaction + tx := rewardTx.RewardTx{ + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + Round: 0, + Epoch: 0, + ShardId: 0, + } + + marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { + rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(rewardTxStored, &tx){ + chanDone <- true + } + + assert.Equal(t, rewardTxStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.RewardTransactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + err = rewardTxResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 659d142b7bc..e80970c06fa 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -88,27 +88,27 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess } // CreateTestShardDataPool creates a test data pool for shard nodes -func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { +func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier, nbShards uint32) dataRetriever.PoolsHolder { if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) dPool, _ := dataPool.NewShardedDataPool( @@ -714,12 +714,12 @@ func GenerateAndDisseminateTxs( for i := 0; i < len(senders); i++ { senderKey := senders[i] - incrementalNonce := uint64(0) + incrementalNonce := make([]uint64, len(senders)) for _, recvPrivateKeys := range receiversPrivateKeys { receiverKey := recvPrivateKeys[i] - tx := generateTransferTx(incrementalNonce, senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) _, _ = n.SendTransaction(tx) - incrementalNonce++ + incrementalNonce[i]++ } } } @@ -933,6 +933,7 @@ func CreateRequesterDataPool( recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int, + nbShards uint32, ) dataRetriever.PoolsHolder { //not allowed to request data from the same shard @@ -961,6 +962,7 @@ func CreateRequesterDataPool( RegisterHandlerCalled: func(i func(key []byte)) { }, }, + nbShards, ) } @@ -984,7 +986,7 @@ func CreateResolversDataPool( txHashes[i] = txHash } - return CreateTestShardDataPool(txPool), txHashes + return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes } func generateValidTx( @@ -1133,7 +1135,7 @@ func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i][:31])) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i][:32])) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 44f3ab4f927..3cbb47e55a4 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -214,7 +214,7 @@ func (tpn *TestProcessorNode) initDataPools() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.MetaDataPool = CreateTestMetaDataPool() } else { - tpn.ShardDataPool = CreateTestShardDataPool(nil) + tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) } } @@ -330,9 +330,20 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestAddressConverter, tpn.SpecialAddressHandler, tpn.Storage, + tpn.ShardDataPool, ) + tpn.InterimProcContainer, _ = interimProcFactory.Create() tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlockType) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + rewardsInter, + ) tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) @@ -353,12 +364,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestAddressConverter, tpn.ShardCoordinator, tpn.ScrForwarder, - &mock.UnsignedTxHandlerMock{}, - ) - tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( - tpn.AccntState, - TestAddressConverter, - tpn.ShardCoordinator, + rewardsHandler, ) txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) @@ -370,7 +376,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, tpn.ShardCoordinator, tpn.ScProcessor, - &mock.UnsignedTxHandlerMock{}, + rewardsHandler, txTypeHandler, ) @@ -573,6 +579,12 @@ func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { haveTime := func() bool { return true } + addresses := []string{ + "rewardAddr0000000000000000000000", + "rewardAddr0000000000000000000001", + } + tpn.BlockProcessor.SetConsensusRewardAddresses(addresses) + blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) if err != nil { fmt.Println(err.Error()) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 566f5b58204..0ba2a13e912 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -113,7 +113,7 @@ func ProposeBlockWithConsensusSignature( round uint64, nonce uint64, randomness []byte, -) (data.BodyHandler, data.HeaderHandler, [][]byte) { +) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { nodesCoordinator := nodesMap[shardId][0].NodesCoordinator pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) @@ -134,7 +134,7 @@ func ProposeBlockWithConsensusSignature( header.SetPrevRandSeed(randomness) header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) - return body, header, txHashes + return body, header, txHashes, consensusNodes } func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 0f13f7a3d1a..b794a8357d6 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -177,7 +177,8 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round if miniBlock.Type != block.RewardsBlockType { continue } - if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + if miniBlock.SenderShardID == rtp.shardCoordinator.SelfId() { + // if sender is the shard, then do this later when reward txs from fee are generated inside continue } @@ -283,9 +284,9 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod if mb.Type != block.RewardsBlockType { continue } - //if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - // continue - //} + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } rewardTxs = append(rewardTxs, mb) } @@ -338,6 +339,10 @@ func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.Mi return missingRewardTxs } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + return missingRewardTxs + } + for _, txHash := range mb.TxHashes { tx, _ := process.GetTransactionHandlerFromPool( mb.SenderShardID, @@ -391,7 +396,7 @@ func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( return rewardTxs, txHashes, nil } -// CreateAndProcessMiniBlock creates the miniblock from storage and processes the smartContractResults added into the miniblock +// CreateAndProcessMiniBlock creates the miniblock from storage and processes the reward transactions added into the miniblock func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { return nil, nil } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index a104b4559b5..3f7a49c1ad0 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,7 +1,6 @@ package preprocess import ( - "github.com/ElrondNetwork/elrond-go/sharding" "math/big" "sync" @@ -9,9 +8,12 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) // MinGasPrice is the minimal gas price to be paid for any transaction @@ -30,21 +32,28 @@ var rewardValue = big.NewInt(1000) type rewardsHandler struct { address process.SpecialAddressHandler - shardCoordinator sharding.Coordinator hasher hashing.Hasher marshalizer marshal.Marshalizer - mut sync.Mutex - accumulatedFees *big.Int - - rewardTxsFromBlock map[string]*rewardTx.RewardTx + shardCoordinator sharding.Coordinator + adrConv state.AddressConverter + store dataRetriever.StorageService + rewardTxPool dataRetriever.ShardedDataCacherNotifier + protocolRewards []data.TransactionHandler + + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx } // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( address process.SpecialAddressHandler, - shardCoordinator sharding.Coordinator, hasher hashing.Hasher, marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + adrConv state.AddressConverter, + store dataRetriever.StorageService, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, ) (*rewardsHandler, error) { if address == nil { return nil, process.ErrNilSpecialAddressHandler @@ -58,36 +67,119 @@ func NewRewardTxHandler( if marshalizer == nil { return nil, process.ErrNilMarshalizer } + if store == nil { + return nil, process.ErrNilStorage + } rtxh := &rewardsHandler{ address: address, shardCoordinator: shardCoordinator, + adrConv: adrConv, hasher: hasher, marshalizer: marshalizer, + store: store, + rewardTxPool: rewardTxPool, } + rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) return rtxh, nil } // SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { - //TODO implement me - save only created accumulatedFees + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for _, rTx := range rtxh.rewardTxsForBlock { + buff, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return err + } + + errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Error(errNotCritical.Error()) + } + } + return nil } // AddIntermediateTransactions adds intermediate transactions to local cache func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for i := 0; i < len(txs); i++ { + addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { + continue + } + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) + if err != nil { + return err + } + + rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx + } + return nil } +func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) { + address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return rtxh.shardCoordinator.NumberOfShards(), err + } + shardId := rtxh.shardCoordinator.ComputeId(address) + + return shardId, nil +} + // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - calculatedRewardTxs := rtxh.CreateAllUTxs() + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rewardsFromFees := rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rewardsFromFees) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rewardsFromFees...) + + miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + + return miniBlocks +} + +func (rtxh *rewardsHandler) addTransactionsToPool(rewardTxs []data.TransactionHandler) { + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + } - miniBlocks := make(map[uint32]*block.MiniBlock) - for _, rTx := range calculatedRewardTxs { + // add the reward transaction to the the pool so that the processor can find it + cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) + rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) + } +} + +func (rtxh *rewardsHandler) miniblocksFromRewardTxs( + rewardTxs []data.TransactionHandler, +) map[uint32]*block.MiniBlock { + miniBlocks := make(map[uint32]*block.MiniBlock, 0) + + for _, rTx := range rewardTxs { dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) if err != nil { log.Debug(err.Error()) @@ -117,36 +209,39 @@ func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlo // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { - err := rtxh.VerifyCreatedUTxs() - rtxh.CleanProcessedUTxs() + err := rtxh.verifyCreatedRewardsTxs() + rtxh.cleanCachedData() return err } // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { - rtxh.CleanProcessedUTxs() + rtxh.cleanCachedData() + rewardTxs := rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rewardTxs) } -// CleanProcessedUTxs deletes the cached data -func (rtxh *rewardsHandler) CleanProcessedUTxs() { +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { rtxh.mut.Lock() - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) - rtxh.mut.Unlock() -} + defer rtxh.mut.Unlock() -// AddRewardTxFromBlock adds an existing reward transaction from block into local cache -func (rtxh *rewardsHandler) AddRewardTxFromBlock(tx data.TransactionHandler) { - currRewardTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - log.Error(process.ErrWrongTypeAssertion.Error()) - return + marshaledTxs := make([][]byte, 0) + for _, txHash := range txHashes { + rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] + if !ok { + return nil, process.ErrRewardTxNotFound + } + + marshaledTx, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + marshaledTxs = append(marshaledTxs, marshaledTx) } - rtxh.mut.Lock() - rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx - rtxh.mut.Unlock() + return marshaledTxs, nil } // ProcessTransactionFee adds the tx cost to the accumulated amount @@ -161,6 +256,14 @@ func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { rtxh.mut.Unlock() } +// cleanCachedData deletes the cached data +func (rtxh *rewardsHandler) cleanCachedData() { + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() +} + func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { x := new(big.Float).SetInt(value) y := big.NewFloat(percentage) @@ -200,22 +303,10 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { return currTx } -// CreateAllUTxs creates all the needed reward transactions -// According to economic paper, out of the block fees 50% are burned, 40% go to the leader and 10% go -// to Elrond community fund. Fixed rewards for every validator are -func (rtxh *rewardsHandler) CreateAllUTxs() []data.TransactionHandler { - - rewardTxs := make([]data.TransactionHandler, 0) - rewardsFromFees := rtxh.createRewardTxsFromFee() - rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() - - rewardTxs = append(rewardTxs, rewardsFromFees...) - rewardTxs = append(rewardTxs, rewardsForConsensus...) - - return rewardTxs -} - -func (rtxh *rewardsHandler) createRewardTxsFromFee() []data.TransactionHandler { +// createRewardFromFees creates the reward transactions from accumulated fees +// According to economic paper, out of the block fees 50% are burned, 40% go to the +// leader and 10% go to Elrond community fund. +func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { rtxh.mut.Lock() defer rtxh.mut.Unlock() @@ -236,7 +327,8 @@ func (rtxh *rewardsHandler) createRewardTxsFromFee() []data.TransactionHandler { return currFeeTxs } -func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.TransactionHandler { +// createProtocolRewards creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() consensusRewardTxs := make([]data.TransactionHandler, 0) @@ -248,31 +340,42 @@ func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.Transactio consensusRewardTxs = append(consensusRewardTxs, rTx) } + + rtxh.protocolRewards = consensusRewardTxs + return consensusRewardTxs } -// VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same -func (rtxh *rewardsHandler) VerifyCreatedUTxs() error { - calculatedFeeTxs := rtxh.CreateAllUTxs() +// VerifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same +func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rewardsFromFees := rtxh.createRewardFromFees() + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rewardsFromFees...) rtxh.mut.Lock() defer rtxh.mut.Unlock() totalFeesFromBlock := big.NewInt(0) - for _, value := range rtxh.rewardTxsFromBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) + for _, rTx := range rtxh.rewardTxsForBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) } totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedFeeTxs { + for _, value := range calculatedRewardTxs { totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) + if err != nil { + return err + } + + txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] if !ok { - return process.ErrTxsFeesNotFound + return process.ErrRewardTxNotFound } - if txFromBlock.Value.Cmp(value.GetValue()) != 0 { - return process.ErrTxsFeesDoNotMatch + if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { + return process.ErrRewardTxsDoNotMatch } } @@ -282,10 +385,3 @@ func (rtxh *rewardsHandler) VerifyCreatedUTxs() error { return nil } - -// CreateMarshalizedData creates the marshalized data for broadcasting purposes -func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - // TODO: implement me - - return make([][]byte, 0), nil -} diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index b16f5272c44..b3c3fe7f932 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,11 +1,11 @@ package preprocess import ( + "github.com/ElrondNetwork/elrond-go/data" "math/big" "testing" "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" @@ -14,11 +14,15 @@ import ( func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( nil, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, th) @@ -28,11 +32,15 @@ func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - nil, &mock.HasherMock{}, &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, th) @@ -42,11 +50,15 @@ func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { func TestNewRewardTxHandler_NilHasher(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), nil, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, th) @@ -56,11 +68,15 @@ func TestNewRewardTxHandler_NilHasher(t *testing.T) { func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, th) @@ -70,11 +86,15 @@ func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { func TestNewRewardTxHandler_ValsOk(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) @@ -84,11 +104,15 @@ func TestNewRewardTxHandler_ValsOk(t *testing.T) { func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) @@ -101,11 +125,15 @@ func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) @@ -121,72 +149,57 @@ func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { assert.Equal(t, big.NewInt(110), th.accumulatedFees) } -func TestRewardTxHandlerAddTxFeeFromBlock(t *testing.T) { - t.Parallel() - - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - th.AddRewardTxFromBlock(nil) - assert.Equal(t, 0, len(th.rewardTxsFromBlock)) - - th.AddRewardTxFromBlock(&transaction.Transaction{}) - assert.Equal(t, 0, len(th.rewardTxsFromBlock)) - - th.AddRewardTxFromBlock(&rewardTx.RewardTx{}) - assert.Equal(t, 1, len(th.rewardTxsFromBlock)) -} - func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) assert.NotNil(t, th) th.ProcessTransactionFee(big.NewInt(10)) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) assert.Equal(t, big.NewInt(10), th.accumulatedFees) - assert.Equal(t, 1, len(th.rewardTxsFromBlock)) + assert.Equal(t, 1, len(th.rewardTxsForBlock)) - th.CleanProcessedUTxs() + th.cleanCachedData() assert.Equal(t, big.NewInt(0), th.accumulatedFees) - assert.Equal(t, 0, len(th.rewardTxsFromBlock)) + assert.Equal(t, 0, len(th.rewardTxsForBlock)) } func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) assert.NotNil(t, th) - txs := th.CreateAllUTxs() + txs := th.createRewardFromFees() assert.Equal(t, 0, len(txs)) currTxFee := big.NewInt(50) th.ProcessTransactionFee(currTxFee) - txs = th.CreateAllUTxs() + txs = th.createRewardFromFees() assert.Equal(t, 3, len(txs)) totalSum := txs[0].GetValue().Uint64() @@ -196,65 +209,73 @@ func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { assert.Equal(t, currTxFee.Uint64(), totalSum) } -func TestRewardTxHandlerVerifyCreatedUTxs(t *testing.T) { +func TestRewardTxHandlerVerifyCreatedRewardsTxs(t *testing.T) { t.Parallel() + tdp := initDataPool() addr := &mock.SpecialAddressHandlerMock{} th, err := NewRewardTxHandler( addr, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) assert.NotNil(t, th) - err = th.VerifyCreatedUTxs() + err = th.verifyCreatedRewardsTxs() assert.Nil(t, err) currTxFee := big.NewInt(50) th.ProcessTransactionFee(currTxFee) - err = th.VerifyCreatedUTxs() - assert.Equal(t, process.ErrTxsFeesNotFound, err) + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) badValue := big.NewInt(100) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: badValue}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: badValue}}) - err = th.VerifyCreatedUTxs() + err = th.verifyCreatedRewardsTxs() assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) - th.CleanProcessedUTxs() + th.cleanCachedData() currTxFee = big.NewInt(50) halfCurrTxFee := big.NewInt(25) th.ProcessTransactionFee(currTxFee) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: halfCurrTxFee}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: halfCurrTxFee}}) - err = th.VerifyCreatedUTxs() - assert.Equal(t, process.ErrTxsFeesNotFound, err) + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) - th.CleanProcessedUTxs() + th.cleanCachedData() currTxFee = big.NewInt(50) th.ProcessTransactionFee(currTxFee) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}}) - err = th.VerifyCreatedUTxs() + err = th.verifyCreatedRewardsTxs() assert.Nil(t, err) } func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { t.Parallel() + tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{}, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) @@ -273,12 +294,16 @@ func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { func TestRewardTxHandlerVerifyInterMiniBlocks(t *testing.T) { t.Parallel() + tdp := initDataPool() addr := &mock.SpecialAddressHandlerMock{} th, err := NewRewardTxHandler( addr, - mock.NewMultiShardsCoordinatorMock(3), &mock.HasherMock{}, &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), ) assert.Nil(t, err) @@ -291,28 +316,28 @@ func TestRewardTxHandlerVerifyInterMiniBlocks(t *testing.T) { th.ProcessTransactionFee(currTxFee) err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTxsFeesNotFound, err) + assert.Equal(t, process.ErrRewardTxNotFound, err) badValue := big.NewInt(100) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: badValue}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: badValue}}) err = th.VerifyInterMiniBlocks(nil) assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) - th.CleanProcessedUTxs() + th.cleanCachedData() currTxFee = big.NewInt(50) halfCurrTxFee := big.NewInt(25) th.ProcessTransactionFee(currTxFee) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: halfCurrTxFee}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: halfCurrTxFee}}) err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTxsFeesNotFound, err) + assert.Equal(t, process.ErrRewardTxNotFound, err) - th.CleanProcessedUTxs() + th.cleanCachedData() currTxFee = big.NewInt(50) th.ProcessTransactionFee(currTxFee) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}) - th.AddRewardTxFromBlock(&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}}) } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 62cc1a56b91..4844f58b441 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -4,6 +4,8 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" "math/rand" "reflect" "sync" @@ -46,6 +48,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -74,6 +77,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -83,6 +87,35 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + } + }, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{} }, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 611287cf46d..107cde5d70c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -4606,7 +4606,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -4672,7 +4672,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -4737,7 +4737,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} diff --git a/process/coordinator/process.go b/process/coordinator/process.go index d1005c39d85..6a545e401c9 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,6 +1,7 @@ package coordinator import ( + "fmt" "sort" "sync" "time" @@ -74,6 +75,7 @@ func NewTransactionCoordinator( if tc.miniBlockPool == nil { return nil, process.ErrNilMiniBlockPool } + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) tc.onRequestMiniBlock = requestHandler.RequestMiniBlock @@ -503,26 +505,40 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( miniBlocks = append(miniBlocks, interMBs...) } - tc.addRewardsMiniBlocks(&miniBlocks) + rewardMb := tc.createRewardsMiniBlocks() + if len(rewardMb) == 0 { + log.Error("could not create reward mini-blocks") + } + + rewardsPreProc := tc.getPreProcessor(block.RewardsBlockType) + for _, mb := range rewardMb { + err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) + if err != nil { + log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) + } + } + miniBlocks = append(miniBlocks, rewardMb...) return miniBlocks } -func (tc *transactionCoordinator) addRewardsMiniBlocks(miniBlocks *block.MiniBlockSlice) { +func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { // add rewards transactions to separate miniBlocks interimProc := tc.getInterimProcessor(block.RewardsBlockType) if interimProc == nil { - return + return nil } + miniBlocks := make(block.MiniBlockSlice, 0) rewardsMbs := interimProc.CreateAllInterMiniBlocks() for key, mb := range rewardsMbs { - mb.ReceiverShardID = key - mb.SenderShardID = tc.shardCoordinator.SelfId() - mb.Type = block.RewardsBlockType - - *miniBlocks = append(*miniBlocks, mb) + mb.ReceiverShardID = key + mb.SenderShardID = tc.shardCoordinator.SelfId() + mb.Type = block.RewardsBlockType + miniBlocks = append(miniBlocks, mb) } + + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0d5dd2160da..c806e3fd9da 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -382,6 +382,7 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, initStore(), + initDataPool([]byte("test_hash1")), ) container, _ := preFactory.Create() @@ -883,12 +884,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMiniblocks(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + nrShards := uint32(5) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } - nrShards := uint32(5) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(nrShards), @@ -1660,6 +1661,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi adrConv, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, ) container, _ := preFactory.Create() @@ -1704,6 +1706,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { adrConv, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, ) container, _ := preFactory.Create() diff --git a/process/errors.go b/process/errors.go index 130568c1c96..9f32c03795a 100644 --- a/process/errors.go +++ b/process/errors.go @@ -445,11 +445,11 @@ var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") // ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") -// ErrTxsFeesDoNotMatch signals that txs fees do not match -var ErrTxsFeesDoNotMatch = errors.New("calculated tx fees with block tx fee does not match") +// ErrRewardTxsDoNotMatch signals that reward txs do not match +var ErrRewardTxsDoNotMatch = errors.New("calculated reward tx with block reward tx does not match") -// ErrTxsFeesNotFound signals that the tx fee not found -var ErrTxsFeesNotFound = errors.New("tx fees not found") +// ErrRewardTxNotFound signals that the reward transaction was not found +var ErrRewardTxNotFound = errors.New("reward transaction not found") // ErrTotalTxsFeesDoNotMatch signals that the total tx fee do not match var ErrTotalTxsFeesDoNotMatch = errors.New("total tx fees do not match") diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index a1e5a2b1dee..11e050b489a 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -119,6 +119,16 @@ func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer return nil, err } + keys, interceptorSlice, err = icf.generateRewardTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + keys, interceptorSlice, err = icf.generateHdrInterceptor() if err != nil { return nil, err diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 1bd7734935e..9d762fbdf14 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -65,6 +65,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -632,12 +635,15 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { container, _ := icf.Create() numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := numInterceptorTxs numInterceptorHeaders := 1 numInterceptorMiniBlocks := noOfShards numInterceptorPeerChanges := 1 numInterceptorMetachainHeaders := 1 totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + - numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorTxs + numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + + numInterceptorsRewardTxs assert.Equal(t, totalInterceptors, container.Len()) } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 8666b0f66fb..8d945638c4d 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -19,6 +19,7 @@ type intermediateProcessorsContainerFactory struct { addrConverter state.AddressConverter specialAddressHandler process.SpecialAddressHandler store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -29,6 +30,7 @@ func NewIntermediateProcessorsContainerFactory( addrConverter state.AddressConverter, specialAddressHandler process.SpecialAddressHandler, store dataRetriever.StorageService, + poolsHolder dataRetriever.PoolsHolder, ) (*intermediateProcessorsContainerFactory, error) { if shardCoordinator == nil { @@ -49,6 +51,9 @@ func NewIntermediateProcessorsContainerFactory( if store == nil { return nil, process.ErrNilStorage } + if poolsHolder == nil { + return nil, process.ErrNilPoolsHolder + } return &intermediateProcessorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -57,6 +62,7 @@ func NewIntermediateProcessorsContainerFactory( addrConverter: addrConverter, specialAddressHandler: specialAddressHandler, store: store, + poolsHolder: poolsHolder, }, nil } @@ -103,9 +109,12 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, - ppcm.shardCoordinator, ppcm.hasher, ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + ppcm.poolsHolder.RewardTransactions(), ) return irp, err diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index f002f83dfe5..3c596728885 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -1,7 +1,8 @@ -package shard +package shard_test import ( "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" "testing" @@ -10,13 +11,15 @@ import ( func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -26,13 +29,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), nil, &mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -42,13 +47,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, nil, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -58,13 +65,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -74,13 +83,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, nil, + dPool, ) assert.Nil(t, ipcf) @@ -90,13 +101,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, err) @@ -106,13 +119,15 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, err) diff --git a/process/interface.go b/process/interface.go index a8a67829cd1..1fa4ae9e6f2 100644 --- a/process/interface.go +++ b/process/interface.go @@ -24,6 +24,7 @@ type TransactionProcessor interface { // RewardTransactionProcessor is the interface for reward transaction execution engine type RewardTransactionProcessor interface { + ProcessCreatedRewardTransaction(reward *rewardTx.RewardTx) error ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error } @@ -92,12 +93,8 @@ type TransactionVerifier interface { } // UnsignedTxHandler creates and verifies unsigned transactions for current round -type UnsignedTxHandler interface { - CleanProcessedUTxs() +type TransactionFeeHandler interface { ProcessTransactionFee(cost *big.Int) - CreateAllUTxs() []data.TransactionHandler - VerifyCreatedUTxs() error - AddRewardTxFromBlock(tx data.TransactionHandler) } // SpecialAddressHandler responds with needed special addresses diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go index 737fa2f7b38..447e68129b8 100644 --- a/process/mock/rewardTxProcessorMock.go +++ b/process/mock/rewardTxProcessorMock.go @@ -6,6 +6,7 @@ import ( type RewardTxProcessorMock struct { ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error + ProcessCreatedRewardTransactionCalled func(rTx *rewardTx.RewardTx) error } func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { @@ -15,3 +16,11 @@ func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.Reward return scrp.ProcessRewardTransactionCalled(rTx) } + +func (scrp *RewardTxProcessorMock) ProcessCreatedRewardTransaction(rTx *rewardTx.RewardTx) error { + if scrp.ProcessCreatedRewardTransactionCalled == nil { + return nil + } + + return scrp.ProcessCreatedRewardTransactionCalled(rTx) +} \ No newline at end of file diff --git a/process/rewardTransaction/intercepteRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go similarity index 100% rename from process/rewardTransaction/intercepteRewardTransaction.go rename to process/rewardTransaction/interceptedRewardTransaction.go diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index a8773f3aa89..d2356288410 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,7 +1,9 @@ package rewardTransaction import ( + "github.com/ElrondNetwork/elrond-go/data" "math/big" + "sync" "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" @@ -13,12 +15,16 @@ type rewardTxProcessor struct { accounts state.AccountsAdapter adrConv state.AddressConverter shardCoordinator sharding.Coordinator + + mutRewardsForwarder sync.Mutex + rewardTxForwarder process.IntermediateTransactionHandler } func NewRewardTxProcessor( accountsDB state.AccountsAdapter, adrConv state.AddressConverter, coordinator sharding.Coordinator, + rewardTxForwarder process.IntermediateTransactionHandler, ) (*rewardTxProcessor, error) { if accountsDB == nil { return nil, process.ErrNilAccountsAdapter @@ -31,25 +37,26 @@ func NewRewardTxProcessor( } return &rewardTxProcessor{ - accounts: accountsDB, - adrConv: adrConv, - shardCoordinator: coordinator, + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + rewardTxForwarder: rewardTxForwarder, }, nil } func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + addr, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) if err != nil { return nil, err } shardForCurrentNode := rtp.shardCoordinator.SelfId() - shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { + shardForAddr := rtp.shardCoordinator.ComputeId(addr) + if shardForCurrentNode != shardForAddr { return nil, nil } - acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) + acnt, err := rtp.accounts.GetAccountWithJournal(addr) if err != nil { return nil, err } @@ -57,18 +64,31 @@ func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.Accou return acnt, nil } +// ProcessCreatedRewardTransaction updates the account state from the reward transaction +func (rtp *rewardTxProcessor) ProcessCreatedRewardTransaction(rTx *rewardTx.RewardTx) error { + return rtp.ProcessRewardTransaction(rTx) +} + // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { if rTx == nil { return process.ErrNilRewardTransaction } + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) if err != nil { return err } + if accHandler == nil || accHandler.IsInterfaceNil() { - return process.ErrNilSCDestAccount + rtp.mutRewardsForwarder.Lock() + err = rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + + return err } rewardAcc, ok := accHandler.(*state.Account) @@ -76,10 +96,6 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return process.ErrWrongTypeAssertion } - if rTx.Value == nil { - return process.ErrNilValueFromRewardTransaction - } - operation := big.NewInt(0) operation = operation.Add(rTx.Value, rewardAcc.Balance) err = rewardAcc.SetBalanceWithJournal(operation) @@ -87,5 +103,9 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return err } - return nil + rtp.mutRewardsForwarder.Lock() + err = rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + + return err } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 922fe367c21..1128b022849 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -41,7 +41,7 @@ type scProcessor struct { mapExecState map[uint64]scExecutionState scrForwarder process.IntermediateTransactionHandler - txFeeHandler process.UnsignedTxHandler + txFeeHandler process.TransactionFeeHandler } var log = logger.DefaultLogger() @@ -57,7 +57,7 @@ func NewSmartContractProcessor( adrConv state.AddressConverter, coordinator sharding.Coordinator, scrForwarder process.IntermediateTransactionHandler, - txFeeHandler process.UnsignedTxHandler, + txFeeHandler process.TransactionFeeHandler, ) (*scProcessor, error) { if vmContainer == nil { return nil, process.ErrNoVM diff --git a/process/transaction/process.go b/process/transaction/process.go index 0a1d6abaae3..90b20796940 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -2,12 +2,10 @@ package transaction import ( "bytes" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "math/big" "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" @@ -25,7 +23,7 @@ type txProcessor struct { hasher hashing.Hasher scProcessor process.SmartContractProcessor marshalizer marshal.Marshalizer - rewardTxHandler process.UnsignedTxHandler + txFeeHandler process.TransactionFeeHandler shardCoordinator sharding.Coordinator txTypeHandler process.TxTypeHandler } @@ -38,7 +36,7 @@ func NewTxProcessor( marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, scProcessor process.SmartContractProcessor, - rewardTxHandler process.UnsignedTxHandler, + txFeeHandler process.TransactionFeeHandler, txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { @@ -60,7 +58,7 @@ func NewTxProcessor( if scProcessor == nil { return nil, process.ErrNilSmartContractProcessor } - if rewardTxHandler == nil { + if txFeeHandler == nil { return nil, process.ErrNilUnsignedTxHandler } if txTypeHandler == nil { @@ -74,7 +72,7 @@ func NewTxProcessor( marshalizer: marshalizer, shardCoordinator: shardCoordinator, scProcessor: scProcessor, - rewardTxHandler: rewardTxHandler, + txFeeHandler: txFeeHandler, txTypeHandler: txTypeHandler, }, nil } @@ -112,8 +110,6 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return txProc.processSCDeployment(tx, adrSrc, roundIndex) case process.SCInvoking: return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) - case process.RewardTx: - return txProc.processRewardTx(tx, adrSrc) } return process.ErrWrongTransaction @@ -149,36 +145,6 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st return cost, nil } -func (txProc *txProcessor) processRewardTx( - tx data.TransactionHandler, - adrSrc state.AddressContainer, -) error { - rTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) - if err != nil { - return err - } - - // is sender address in node shard - if acntSrc != nil { - op := big.NewInt(0) - err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) - if err != nil { - return err - } - } - - if rTx.ShardId == txProc.shardCoordinator.SelfId() { - txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) - } - - return nil -} - func (txProc *txProcessor) processMoveBalance( tx *transaction.Transaction, adrSrc, adrDst state.AddressContainer, @@ -211,7 +177,7 @@ func (txProc *txProcessor) processMoveBalance( } } - txProc.rewardTxHandler.ProcessTransactionFee(txFee) + txProc.txFeeHandler.ProcessTransactionFee(txFee) return nil } From 6aa8ae70fe17d0d94e44132078ad5b358d997352 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 08:27:45 +0300 Subject: [PATCH 075/234] process, integrationTests: remove unused interface methods, reward tx from fees needs to be processed last --- integrationTests/testProcessorNode.go | 10 +++---- .../block/preprocess/rewardTxPreProcessor.go | 23 +++++++++------- process/coordinator/process.go | 11 ++++++++ process/interface.go | 1 - process/mock/rewardTxProcessorMock.go | 9 ------- process/rewardTransaction/process.go | 26 +++++++------------ 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 3cbb47e55a4..f9247252cec 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -579,11 +579,11 @@ func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { haveTime := func() bool { return true } - addresses := []string{ - "rewardAddr0000000000000000000000", - "rewardAddr0000000000000000000001", - } - tpn.BlockProcessor.SetConsensusRewardAddresses(addresses) + //addresses := []string{ + // "rewardAddr0000000000000000000000", + // "rewardAddr0000000000000000000001", + //} + //tpn.BlockProcessor.SetConsensusRewardAddresses(addresses) blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) if err != nil { diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index b794a8357d6..7bbd9e12721 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -177,10 +177,13 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round if miniBlock.Type != block.RewardsBlockType { continue } - if miniBlock.SenderShardID == rtp.shardCoordinator.SelfId() { - // if sender is the shard, then do this later when reward txs from fee are generated inside - continue - } + //if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + // continue + //} + //if miniBlock.SenderShardID == rtp.shardCoordinator.SelfId() { + // // if sender is the shard, then do this later when reward txs from fee are generated + // continue + //} for j := 0; j < len(miniBlock.TxHashes); j++ { if haveTime() < 0 { @@ -284,9 +287,9 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod if mb.Type != block.RewardsBlockType { continue } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } + //if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + // continue + //} rewardTxs = append(rewardTxs, mb) } @@ -339,9 +342,9 @@ func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.Mi return missingRewardTxs } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - return missingRewardTxs - } + //if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + // return missingRewardTxs + //} for _, txHash := range mb.TxHashes { tx, _ := process.GetTransactionHandlerFromPool( diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 6a545e401c9..58050f85815 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -362,6 +362,9 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( if separatedBodies[blockType] == nil { continue } + if blockType == block.RewardsBlockType { + continue + } preproc := tc.getPreProcessor(blockType) if preproc == nil { @@ -374,6 +377,14 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( } } + // create the reward txs and make them available for processing + _ = tc.createRewardsMiniBlocks() + rewardsPreProc := tc.getPreProcessor(block.RewardsBlockType) + err := rewardsPreProc.ProcessBlockTransactions(separatedBodies[block.RewardsBlockType], round, haveTime) + if err != nil { + return err + } + return nil } diff --git a/process/interface.go b/process/interface.go index 1fa4ae9e6f2..70e388bf4cc 100644 --- a/process/interface.go +++ b/process/interface.go @@ -24,7 +24,6 @@ type TransactionProcessor interface { // RewardTransactionProcessor is the interface for reward transaction execution engine type RewardTransactionProcessor interface { - ProcessCreatedRewardTransaction(reward *rewardTx.RewardTx) error ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error } diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go index 447e68129b8..737fa2f7b38 100644 --- a/process/mock/rewardTxProcessorMock.go +++ b/process/mock/rewardTxProcessorMock.go @@ -6,7 +6,6 @@ import ( type RewardTxProcessorMock struct { ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error - ProcessCreatedRewardTransactionCalled func(rTx *rewardTx.RewardTx) error } func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { @@ -16,11 +15,3 @@ func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.Reward return scrp.ProcessRewardTransactionCalled(rTx) } - -func (scrp *RewardTxProcessorMock) ProcessCreatedRewardTransaction(rTx *rewardTx.RewardTx) error { - if scrp.ProcessCreatedRewardTransactionCalled == nil { - return nil - } - - return scrp.ProcessCreatedRewardTransactionCalled(rTx) -} \ No newline at end of file diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index d2356288410..f71639cfbec 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -64,11 +64,6 @@ func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.Accou return acnt, nil } -// ProcessCreatedRewardTransaction updates the account state from the reward transaction -func (rtp *rewardTxProcessor) ProcessCreatedRewardTransaction(rTx *rewardTx.RewardTx) error { - return rtp.ProcessRewardTransaction(rTx) -} - // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { if rTx == nil { @@ -78,17 +73,21 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return process.ErrNilValueFromRewardTransaction } + rtp.mutRewardsForwarder.Lock() + err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + if err != nil { + return err + } + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) if err != nil { return err } if accHandler == nil || accHandler.IsInterfaceNil() { - rtp.mutRewardsForwarder.Lock() - err = rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) - rtp.mutRewardsForwarder.Unlock() - - return err + // address from different shard + return nil } rewardAcc, ok := accHandler.(*state.Account) @@ -99,13 +98,6 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e operation := big.NewInt(0) operation = operation.Add(rTx.Value, rewardAcc.Balance) err = rewardAcc.SetBalanceWithJournal(operation) - if err != nil { - return err - } - - rtp.mutRewardsForwarder.Lock() - err = rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) - rtp.mutRewardsForwarder.Unlock() return err } From 679e9a1550c21f78d70fdd5b9b344e8a5c10d4ac Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 12:03:02 +0300 Subject: [PATCH 076/234] process: do not request reward transactions from fees that can be generated --- .../block/preprocess/rewardTxPreProcessor.go | 30 +++++++++++++++++-- process/coordinator/process.go | 13 ++++++-- process/interface.go | 5 ++++ 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 7bbd9e12721..e7d129fccc4 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -218,6 +218,30 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round return nil } +func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { + + for _, rewardMb := range computedRewardMiniblocks { + txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + for _, txHash := range rewardMb.TxHashes { + tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) + if !ok { + log.Error("reward transaction should be in pool but not found") + continue + } + + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error("wrong type in reward transactions pool") + } + + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ + tx: rTx, + txShardInfo: txShardInfo, + } + } + } +} + // SaveTxBlockToStorage saves the reward transactions from body into storage func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { for i := 0; i < len(body); i++ { @@ -287,9 +311,9 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod if mb.Type != block.RewardsBlockType { continue } - //if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - // continue - //} + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } rewardTxs = append(rewardTxs, mb) } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 58050f85815..73d2682aeb1 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -378,9 +378,16 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( } // create the reward txs and make them available for processing - _ = tc.createRewardsMiniBlocks() - rewardsPreProc := tc.getPreProcessor(block.RewardsBlockType) - err := rewardsPreProc.ProcessBlockTransactions(separatedBodies[block.RewardsBlockType], round, haveTime) + mbRewards := tc.createRewardsMiniBlocks() + preproc := tc.getPreProcessor(block.RewardsBlockType) + rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) + if !ok { + return process.ErrWrongTypeAssertion + } + + rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) + + err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlockType], round, haveTime) if err != nil { return err } diff --git a/process/interface.go b/process/interface.go index 70e388bf4cc..c08f5bbd6d1 100644 --- a/process/interface.go +++ b/process/interface.go @@ -27,6 +27,11 @@ type RewardTransactionProcessor interface { ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error } +// RewardTransactionPreProcessor prepares the processing of reward transactions +type RewardTransactionPreProcessor interface{ + AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) +} + // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error From f36aa49a9ec1f7fc1e1d8b608d34292d3801e49d Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 14:57:20 +0300 Subject: [PATCH 077/234] consensus, process, data, sharding, integrationTests: fix after merge --- cmd/node/factory/structs.go | 2 +- cmd/node/main.go | 6 - consensus/mock/mockTestInitializer.go | 2 +- consensus/mock/nodesCoordinatorMock.go | 22 ++- .../commonSubround/subroundStartRound_test.go | 1 - data/address/specialAddresses.go | 8 + .../factory/accountCreatorFactory_test.go | 28 +--- data/state/factory/accountCreator_test.go | 18 +- data/state/factory/peerAccountCreator.go | 8 + data/state/peerJournalEntries.go | 154 ++++++++++++++---- integrationTests/longTests/storage_test.go | 12 +- integrationTests/mock/keyMock.go | 23 +++ integrationTests/mock/nodesCoordinatorMock.go | 18 +- .../mock/specialAddressHandlerMock.go | 8 + integrationTests/mock/txTypeHandlerMock.go | 7 + node/mock/nodesCoordinatorMock.go | 22 ++- process/interface.go | 1 + process/mock/nodesCoordinatorMock.go | 7 + process/mock/specialAddressHandlerMock.go | 8 + process/mock/txTypeHandlerMock.go | 8 + process/unsigned/feeTxHandler.go | 8 + sharding/indexHashedNodesCoordinator.go | 8 + sharding/indexHashedNodesCoordinator_test.go | 26 +-- sharding/interface.go | 2 + sharding/mock/hasherMock.go | 14 +- sharding/mock/hasherStub.go | 8 + sharding/nodesSetup_test.go | 32 ++-- 27 files changed, 318 insertions(+), 143 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index a0a38c2738a..783796f8c03 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1340,7 +1340,7 @@ func createInMemoryShardCoordinatorAndAccount( return nil, nil, err } - accountFactory, err := factoryState.NewAccountFactoryCreator(newShardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, nil, err } diff --git a/cmd/node/main.go b/cmd/node/main.go index 993686216c4..848265fe7a6 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -1160,16 +1160,10 @@ func createNode( if err != nil { return nil, errors.New("error creating node: " + err.Error()) } - err = nd.CreateShardedStores() if err != nil { return nil, err } - - err = nd.CreateShardGenesisBlock() - if err != nil { - return nil, err - } } if shardCoordinator.SelfId() == sharding.MetachainShardId { err = nd.ApplyOptions(node.WithMetaDataPool(data.MetaDatapool)) diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index f738bf6582c..e45adea2759 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -122,7 +122,7 @@ func InitConsensusCore() *ConsensusCoreMock { rounderMock := &RounderMock{} shardCoordinatorMock := ShardCoordinatorMock{} syncTimerMock := &SyncTimerMock{} - validatorGroupSelector := NodesCoordinatorMock{} + validatorGroupSelector := &NodesCoordinatorMock{} container := &ConsensusCoreMock{ blockChain, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 420b4bedab8..19a37d41a6c 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -11,7 +11,7 @@ type NodesCoordinatorMock struct { GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -36,7 +36,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) } @@ -55,22 +55,30 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index ec3b7021243..77ecb1d5729 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 10d4aa6dee5..ff6ba1fee48 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -78,3 +78,11 @@ func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { return sp.shardCoordinator.ComputeId(convAdr), nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (sp *specialAddresses) IsInterfaceNil() bool { + if sp == nil { + return true + } + return false +} \ No newline at end of file diff --git a/data/state/factory/accountCreatorFactory_test.go b/data/state/factory/accountCreatorFactory_test.go index 2b45e583a86..d2852ff5a7c 100644 --- a/data/state/factory/accountCreatorFactory_test.go +++ b/data/state/factory/accountCreatorFactory_test.go @@ -6,27 +6,13 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) -func TestNewAccountFactoryCreator_NilShardCoordinator(t *testing.T) { - t.Parallel() - - accF, err := factory.NewAccountFactoryCreator(nil) - - assert.Equal(t, err, state.ErrNilShardCoordinator) - assert.Nil(t, accF) -} - func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) @@ -40,11 +26,7 @@ func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) @@ -72,11 +54,7 @@ func TestNewAccountFactoryCreator_PeerAccount(t *testing.T) { func TestNewAccountFactoryCreator_UnknownType(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 10, - NrOfShards: 5, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(10) assert.Nil(t, accF) assert.Equal(t, state.ErrUnknownAccountType, err) } diff --git a/data/state/factory/accountCreator_test.go b/data/state/factory/accountCreator_test.go index cf63e6219a0..1ffc6d27a7e 100644 --- a/data/state/factory/accountCreator_test.go +++ b/data/state/factory/accountCreator_test.go @@ -12,11 +12,7 @@ import ( func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -31,11 +27,7 @@ func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -50,11 +42,7 @@ func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go index 12c4fd510ce..a1edba4e880 100644 --- a/data/state/factory/peerAccountCreator.go +++ b/data/state/factory/peerAccountCreator.go @@ -20,3 +20,11 @@ func (c *PeerAccountCreator) CreateAccount(address state.AddressContainer, track return account, nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (c *PeerAccountCreator) IsInterfaceNil() bool { + if c == nil { + return true + } + return false +} diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go index 583995d0be2..84251b32aee 100644 --- a/data/state/peerJournalEntries.go +++ b/data/state/peerJournalEntries.go @@ -23,10 +23,18 @@ func NewPeerJournalEntryAddress(account *PeerAccount, oldAddress []byte) (*PeerJ } // Revert applies undo operation -func (jen *PeerJournalEntryAddress) Revert() (AccountHandler, error) { - jen.account.Address = jen.oldAddress +func (pje *PeerJournalEntryAddress) Revert() (AccountHandler, error) { + pje.account.Address = pje.oldAddress - return jen.account, nil + return pje.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pje *PeerJournalEntryAddress) IsInterfaceNil() bool { + if pje == nil { + return true + } + return false } //------- PeerJournalEntrySchnorrPublicKey @@ -53,10 +61,18 @@ func NewPeerJournalEntrySchnorrPublicKey( } // Revert applies undo operation -func (jen *PeerJournalEntrySchnorrPublicKey) Revert() (AccountHandler, error) { - jen.account.SchnorrPublicKey = jen.oldSchnorrPubKey +func (jens *PeerJournalEntrySchnorrPublicKey) Revert() (AccountHandler, error) { + jens.account.SchnorrPublicKey = jens.oldSchnorrPubKey - return jen.account, nil + return jens.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (jens *PeerJournalEntrySchnorrPublicKey) IsInterfaceNil() bool { + if jens == nil { + return true + } + return false } //------- PeerJournalEntryBLSPublicKey @@ -80,10 +96,18 @@ func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldBLSPubKey []byte) } // Revert applies undo operation -func (jen *PeerJournalEntryBLSPublicKey) Revert() (AccountHandler, error) { - jen.account.BLSPublicKey = jen.oldBLSPubKey +func (pjeb *PeerJournalEntryBLSPublicKey) Revert() (AccountHandler, error) { + pjeb.account.BLSPublicKey = pjeb.oldBLSPubKey + + return pjeb.account, nil +} - return jen.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjeb *PeerJournalEntryBLSPublicKey) IsInterfaceNil() bool { + if pjeb == nil { + return true + } + return false } //------- PeerJournalEntryStake @@ -107,10 +131,18 @@ func NewPeerJournalEntryStake(account *PeerAccount, oldStake *big.Int) (*PeerJou } // Revert applies undo operation -func (jeb *PeerJournalEntryStake) Revert() (AccountHandler, error) { - jeb.account.Stake = jeb.oldStake +func (pjes *PeerJournalEntryStake) Revert() (AccountHandler, error) { + pjes.account.Stake = pjes.oldStake - return jeb.account, nil + return pjes.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjes *PeerJournalEntryStake) IsInterfaceNil() bool { + if pjes == nil { + return true + } + return false } // PeerJournalEntryJailTime is used to revert a balance change @@ -132,10 +164,18 @@ func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) ( } // Revert applies undo operation -func (jeb *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { - jeb.account.JailTime = jeb.oldJailTime +func (pjej *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { + pjej.account.JailTime = pjej.oldJailTime + + return pjej.account, nil +} - return jeb.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjej *PeerJournalEntryJailTime) IsInterfaceNil() bool { + if pjej == nil { + return true + } + return false } // PeerJournalEntryCurrentShardId is used to revert a shardId change @@ -157,10 +197,18 @@ func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShardId uint32) } // Revert applies undo operation -func (jeb *PeerJournalEntryCurrentShardId) Revert() (AccountHandler, error) { - jeb.account.CurrentShardId = jeb.oldShardId +func (pjec *PeerJournalEntryCurrentShardId) Revert() (AccountHandler, error) { + pjec.account.CurrentShardId = pjec.oldShardId - return jeb.account, nil + return pjec.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjec *PeerJournalEntryCurrentShardId) IsInterfaceNil() bool { + if pjec == nil { + return true + } + return false } // PeerJournalEntryNextShardId is used to revert a shardId change @@ -182,10 +230,18 @@ func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShardId uint32) (*P } // Revert applies undo operation -func (jeb *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { - jeb.account.NextShardId = jeb.oldShardId +func (pjen *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { + pjen.account.NextShardId = pjen.oldShardId + + return pjen.account, nil +} - return jeb.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjen *PeerJournalEntryNextShardId) IsInterfaceNil() bool { + if pjen == nil { + return true + } + return false } // PeerJournalEntryInWaitingList is used to revert a shardId change @@ -210,10 +266,18 @@ func NewPeerJournalEntryInWaitingList( } // Revert applies undo operation -func (jeb *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { - jeb.account.NodeInWaitingList = jeb.oldNodeInWaitingList +func (pjew *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { + pjew.account.NodeInWaitingList = pjew.oldNodeInWaitingList - return jeb.account, nil + return pjew.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjew *PeerJournalEntryInWaitingList) IsInterfaceNil() bool { + if pjew == nil { + return true + } + return false } // PeerJournalEntryValidatorSuccessRate is used to revert a success rate change @@ -238,10 +302,18 @@ func NewPeerJournalEntryValidatorSuccessRate( } // Revert applies undo operation -func (jeb *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { - jeb.account.ValidatorSuccessRate = jeb.oldValidatorSuccessRate +func (pjev *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { + pjev.account.ValidatorSuccessRate = pjev.oldValidatorSuccessRate + + return pjev.account, nil +} - return jeb.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjev *PeerJournalEntryValidatorSuccessRate) IsInterfaceNil() bool { + if pjev == nil { + return true + } + return false } // PeerJournalEntryLeaderSuccessRate is used to revert a success rate change @@ -266,10 +338,18 @@ func NewPeerJournalEntryLeaderSuccessRate( } // Revert applies undo operation -func (jeb *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { - jeb.account.LeaderSuccessRate = jeb.oldLeaderSuccessRate +func (pjel *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { + pjel.account.LeaderSuccessRate = pjel.oldLeaderSuccessRate + + return pjel.account, nil +} - return jeb.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjel *PeerJournalEntryLeaderSuccessRate) IsInterfaceNil() bool { + if pjel == nil { + return true + } + return false } // PeerJournalEntryRating is used to revert a rating change @@ -291,8 +371,16 @@ func NewPeerJournalEntryRating(account *PeerAccount, oldRating uint32) (*PeerJou } // Revert applies undo operation -func (jeb *PeerJournalEntryRating) Revert() (AccountHandler, error) { - jeb.account.Rating = jeb.oldRating +func (pjer *PeerJournalEntryRating) Revert() (AccountHandler, error) { + pjer.account.Rating = pjer.oldRating + + return pjer.account, nil +} - return jeb.account, nil +// IsInterfaceNil returns true if there is no value under the interface +func (pjer *PeerJournalEntryRating) IsInterfaceNil() bool { + if pjer == nil { + return true + } + return false } diff --git a/integrationTests/longTests/storage_test.go b/integrationTests/longTests/storage_test.go index 09ca8f3fb73..707bc350999 100644 --- a/integrationTests/longTests/storage_test.go +++ b/integrationTests/longTests/storage_test.go @@ -11,9 +11,7 @@ import ( ) func TestWriteContinuously(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } + t.Skip("this is not a short test") nbTxsWrite := 1000000 testStorage := integrationTests.NewTestStorage() @@ -41,9 +39,7 @@ func TestWriteContinuously(t *testing.T) { } func TestWriteReadDeleteLevelDB(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } + t.Skip("this is not a short test") maxWritten := uint64(0) mapRemovedKeys := sync.Map{} @@ -71,9 +67,7 @@ func TestWriteReadDeleteLevelDB(t *testing.T) { } func TestWriteReadDeleteLevelDBSerial(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } + t.Skip("this is not a short test") maxWritten := uint64(0) mapRemovedKeys := sync.Map{} diff --git a/integrationTests/mock/keyMock.go b/integrationTests/mock/keyMock.go index eea03d6b37a..6e22e985a10 100644 --- a/integrationTests/mock/keyMock.go +++ b/integrationTests/mock/keyMock.go @@ -27,6 +27,14 @@ func (sspk *PublicKeyMock) Point() crypto.Point { return nil } +// IsInterfaceNil returns true if there is no value under the interface +func (sspk *PublicKeyMock) IsInterfaceNil() bool { + if sspk == nil { + return true + } + return false +} + //------- PrivateKeyMock func (sk *PrivateKeyMock) ToByteArray() ([]byte, error) { @@ -45,6 +53,13 @@ func (sk *PrivateKeyMock) Scalar() crypto.Scalar { return nil } +// IsInterfaceNil returns true if there is no value under the interface +func (sk *PrivateKeyMock) IsInterfaceNil() bool { + if sk == nil { + return true + } + return false +} //------KeyGenMock func (keyGen *KeyGenMock) GeneratePair() (crypto.PrivateKey, crypto.PublicKey) { @@ -62,3 +77,11 @@ func (keyGen *KeyGenMock) PublicKeyFromByteArray(b []byte) (crypto.PublicKey, er func (keyGen *KeyGenMock) Suite() crypto.Suite { return nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (keyGen *KeyGenMock) IsInterfaceNil() bool { + if keyGen == nil { + return true + } + return false +} diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index bb3c681e0cc..9e7b5a18b5a 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -9,7 +9,7 @@ type NodesCoordinatorMock struct { GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -24,7 +24,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( randomness []byte, round uint64, shardId uint32, @@ -47,14 +47,22 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( return pubKeys, nil } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 2c6f4207c50..1c752776c73 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -44,3 +44,11 @@ func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, err return sh.ShardIdForAddressCalled(addr) } + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} \ No newline at end of file diff --git a/integrationTests/mock/txTypeHandlerMock.go b/integrationTests/mock/txTypeHandlerMock.go index 2fcaeaf25d3..42b6460b56f 100644 --- a/integrationTests/mock/txTypeHandlerMock.go +++ b/integrationTests/mock/txTypeHandlerMock.go @@ -16,3 +16,10 @@ func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) return th.ComputeTransactionTypeCalled(tx) } + +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index b3d1f307dea..289f2efeea1 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -11,7 +11,7 @@ type NodesCoordinatorMock struct { GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } -func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, shardId uint32, @@ -36,7 +36,7 @@ func (ncm NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } -func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( randomness []byte, round uint64, shardId uint32, @@ -59,22 +59,30 @@ func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys( return pubKeys, nil } -func (ncm NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { panic("implement me") } -func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { return nil } -func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { panic("implement me") } -func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { panic("implement me") } -func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { panic("implement me") } + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/process/interface.go b/process/interface.go index 0d5ebb63aa9..446b8aadfb5 100644 --- a/process/interface.go +++ b/process/interface.go @@ -111,6 +111,7 @@ type SpecialAddressHandler interface { LeaderAddress() []byte BurnAddress() []byte ShardIdForAddress([]byte) (uint32, error) + IsInterfaceNil() bool } // PreProcessor is an interface used to prepare and process transaction data diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 324cd59e858..48edff479e6 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -134,3 +134,10 @@ func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sh return nil, 0, sharding.ErrValidatorNotFound } + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 2c6f4207c50..1c752776c73 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -44,3 +44,11 @@ func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, err return sh.ShardIdForAddressCalled(addr) } + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} \ No newline at end of file diff --git a/process/mock/txTypeHandlerMock.go b/process/mock/txTypeHandlerMock.go index 2fcaeaf25d3..06b13d78677 100644 --- a/process/mock/txTypeHandlerMock.go +++ b/process/mock/txTypeHandlerMock.go @@ -16,3 +16,11 @@ func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) return th.ComputeTransactionTypeCalled(tx) } + +// IsInterfaceNil returns true if there is no value under the interface +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} \ No newline at end of file diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 5dd89db7b15..9ea6537011d 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -255,3 +255,11 @@ func (ftxh *feeTxHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, er return make([][]byte, 0), nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (ftxh *feeTxHandler) IsInterfaceNil() bool { + if ftxh == nil { + return true + } + return false +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 58fc4dded67..617f1733c64 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -262,3 +262,11 @@ func (ihgs *indexHashedNodesCoordinator) consensusGroupSize(shardId uint32) int return ihgs.shardConsensusGroupSize } + +// IsInterfaceNil returns true if there is no value under the interface +func (ihgs *indexHashedNodesCoordinator) IsInterfaceNil() bool { + if ihgs == nil { + return true + } + return false +} diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 4f8745b8af6..44cc4f0f587 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -73,7 +73,7 @@ func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testi ihgs, err := sharding.NewIndexHashedNodesCoordinator( 0, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -90,7 +90,7 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { ihgs, err := sharding.NewIndexHashedNodesCoordinator( 1, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -109,7 +109,7 @@ func TestIndexHashedGroupSelector_SetNilNodesMapShouldErr(t *testing.T) { ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 2, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -125,7 +125,7 @@ func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { ihgs, err := sharding.NewIndexHashedNodesCoordinator( 2, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -144,7 +144,7 @@ func TestIndexHashedGroupSelector_NewCoordinatorGroup0SizeShouldErr(t *testing.T ihgs, err := sharding.NewIndexHashedNodesCoordinator( 0, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -161,7 +161,7 @@ func TestIndexHashedGroupSelector_NewCoordinatorTooFewNodesShouldErr(t *testing. ihgs, err := sharding.NewIndexHashedNodesCoordinator( 10, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -178,7 +178,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 2, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -197,7 +197,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupInvalidShardIdShouldErr( ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 2, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -223,7 +223,7 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSa ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 1, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -461,7 +461,7 @@ func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing. ihgs, _ := sharding.NewIndexHashedNodesCoordinator( consensusGroupSize, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -489,7 +489,7 @@ func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrNilPub ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 1, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -512,7 +512,7 @@ func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrValida ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 1, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 1, nodesMap, @@ -550,7 +550,7 @@ func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldWork(t *testing ihgs, _ := sharding.NewIndexHashedNodesCoordinator( 1, 1, - mock.HasherMock{}, + &mock.HasherMock{}, 0, 2, nodesMap, diff --git a/sharding/interface.go b/sharding/interface.go index 41ca3e34442..edace72472f 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -16,6 +16,7 @@ type Coordinator interface { SelfId() uint32 SameShard(firstAddress, secondAddress state.AddressContainer) bool CommunicationIdentifier(destShardID uint32) string + IsInterfaceNil() bool } // Validator defines a node that can be allocated to a shard for participation in a consensus group as validator @@ -33,6 +34,7 @@ type NodesCoordinator interface { SetNodesPerShards(nodes map[uint32][]Validator) error ComputeValidatorsGroup(randomness []byte, round uint64, shardId uint32) (validatorsGroup []Validator, err error) GetValidatorWithPublicKey(publicKey []byte) (validator Validator, shardId uint32, err error) + IsInterfaceNil() bool } // PublicKeysSelector allows retrieval of eligible validators public keys diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go index 0120c31c30f..2a917bdb13e 100644 --- a/sharding/mock/hasherMock.go +++ b/sharding/mock/hasherMock.go @@ -9,14 +9,14 @@ type HasherMock struct { } // Compute will output the SHA's equivalent of the input string -func (sha HasherMock) Compute(s string) []byte { +func (sha *HasherMock) Compute(s string) []byte { h := sha256.New() h.Write([]byte(s)) return h.Sum(nil) } // EmptyHash will return the equivalent of empty string SHA's -func (sha HasherMock) EmptyHash() []byte { +func (sha *HasherMock) EmptyHash() []byte { if len(sha256EmptyHash) == 0 { sha256EmptyHash = sha.Compute("") } @@ -24,6 +24,14 @@ func (sha HasherMock) EmptyHash() []byte { } // Size return the required size in bytes -func (HasherMock) Size() int { +func (sha *HasherMock) Size() int { return sha256.Size } + +// IsInterfaceNil returns true if there is no value under the interface +func (sha *HasherMock) IsInterfaceNil() bool { + if sha == nil { + return true + } + return false +} \ No newline at end of file diff --git a/sharding/mock/hasherStub.go b/sharding/mock/hasherStub.go index 8684b95ecb2..9589b18a98e 100644 --- a/sharding/mock/hasherStub.go +++ b/sharding/mock/hasherStub.go @@ -20,3 +20,11 @@ func (hs *HasherStub) EmptyHash() []byte { func (hs *HasherStub) Size() int { return hs.SizeCalled() } + +// IsInterfaceNil returns true if there is no value under the interface +func (hs *HasherStub) IsInterfaceNil() bool { + if hs == nil { + return true + } + return false +} \ No newline at end of file diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 6e1888bd336..9ee30a85038 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -28,16 +28,20 @@ var ( } ) -func createNodesSetupOneShardOneNode() *sharding.NodesSetup { - noOfInitialNodes := 1 +func createNodesSetupOneShardOneNodeWithOneMeta() *sharding.NodesSetup { + noOfInitialNodes := 2 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 1 + ns.MetaChainConsensusGroupSize = 1 + ns.MetaChainMinNodes = 1 ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) ns.InitialNodes[0] = &sharding.InitialNode{} ns.InitialNodes[0].PubKey = PubKeys[0] ns.InitialNodes[0].Address = Address[0] - + ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes[1].PubKey = PubKeys[1] + ns.InitialNodes[1].Address = Address[1] err := ns.ProcessConfig() if err != nil { return nil @@ -51,10 +55,12 @@ func createNodesSetupOneShardOneNode() *sharding.NodesSetup { } func createNodesSetupTwoShardTwoNodesWithOneMeta() *sharding.NodesSetup { - noOfInitialNodes := 4 + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 + ns.MetaChainConsensusGroupSize = 1 + ns.MetaChainMinNodes = 2 ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) for i := 0; i < noOfInitialNodes; i++ { @@ -80,6 +86,8 @@ func createNodesSetupTwoShard5NodesWithMeta() *sharding.NodesSetup { ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 + ns.MetaChainConsensusGroupSize = 1 + ns.MetaChainMinNodes = 1 ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) for i := 0; i < noOfInitialNodes; i++ { @@ -383,27 +391,17 @@ func TestNodesSetup_InitialNodesPubKeysForShardGood(t *testing.T) { inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } -func TestNodesSetup_InitialNodesPubKeysForShardWrongMeta(t *testing.T) { - ns := createNodesSetupTwoShard6NodesMeta() - metaId := sharding.MetachainShardId - inPK, err := ns.InitialNodesInfoForShard(metaId) - - assert.NotNil(t, ns) - assert.Nil(t, inPK) - assert.NotNil(t, err) -} - func TestNodesSetup_InitialNodesPubKeysForShardGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId inPK, err := ns.InitialNodesInfoForShard(metaId) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } @@ -424,7 +422,7 @@ func TestNodesSetup_PublicKeyGood(t *testing.T) { assert.NotNil(t, ns) assert.Nil(t, err) - assert.Equal(t, uint32(1), selfId) + assert.Equal(t, uint32(0), selfId) } func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { From 8767c92e94be935000f54e0d5d5e98b214ca8b50 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 18:51:32 +0300 Subject: [PATCH 078/234] process: cache generated reward txs for reuse --- process/block/preprocess/rewardsHandler.go | 32 +++++++++++++++------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 3f7a49c1ad0..ae970ab4c84 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -38,7 +38,10 @@ type rewardsHandler struct { adrConv state.AddressConverter store dataRetriever.StorageService rewardTxPool dataRetriever.ShardedDataCacherNotifier - protocolRewards []data.TransactionHandler + + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + feeRewards []data.TransactionHandler mut sync.Mutex accumulatedFees *big.Int @@ -145,11 +148,13 @@ func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + rtxh.mutGenRewardTxs.Lock() calculatedRewardTxs := make([]data.TransactionHandler, 0) - rewardsFromFees := rtxh.createRewardFromFees() - rtxh.addTransactionsToPool(rewardsFromFees) + rtxh.feeRewards = rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rtxh.feeRewards) calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rewardsFromFees...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.Unlock() miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) @@ -210,8 +215,6 @@ func (rtxh *rewardsHandler) miniblocksFromRewardTxs( // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { err := rtxh.verifyCreatedRewardsTxs() - rtxh.cleanCachedData() - return err } @@ -262,6 +265,11 @@ func (rtxh *rewardsHandler) cleanCachedData() { rtxh.accumulatedFees = big.NewInt(0) rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) rtxh.mut.Unlock() + + rtxh.mutGenRewardTxs.Lock() + rtxh.feeRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.Unlock() } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { @@ -281,6 +289,7 @@ func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() return currTx } @@ -290,6 +299,7 @@ func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() return currTx } @@ -299,6 +309,7 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() return currTx } @@ -322,8 +333,6 @@ func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { currFeeTxs := make([]data.TransactionHandler, 0) currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - rtxh.accumulatedFees = big.NewInt(0) - return currFeeTxs } @@ -341,7 +350,9 @@ func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { consensusRewardTxs = append(consensusRewardTxs, rTx) } + rtxh.mutGenRewardTxs.Lock() rtxh.protocolRewards = consensusRewardTxs + rtxh.mutGenRewardTxs.Unlock() return consensusRewardTxs } @@ -349,9 +360,10 @@ func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { // VerifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { calculatedRewardTxs := make([]data.TransactionHandler, 0) - rewardsFromFees := rtxh.createRewardFromFees() + rtxh.mutGenRewardTxs.RLock() calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rewardsFromFees...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.RUnlock() rtxh.mut.Lock() defer rtxh.mut.Unlock() From f03f2f381612d7594653c51d20e144fb5513e65f Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 19:33:16 +0300 Subject: [PATCH 079/234] process: fixes after merge --- process/block/metablock_test.go | 2 ++ process/block/preprocess/rewardTxPreProcessor.go | 8 ++++++++ process/block/preprocess/rewardsHandler.go | 8 ++++++++ process/factory/shard/preProcessorsContainerFactory.go | 1 + process/rewardTransaction/interceptor.go | 8 ++++++++ 5 files changed, 27 insertions(+) diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index f8b7b27686d..cde408dd849 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -2536,6 +2536,8 @@ func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index e7d129fccc4..b24eb90b2cb 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -485,3 +485,11 @@ func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.Transact return rewardTxPool } + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false +} diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index ae970ab4c84..00dde118180 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -397,3 +397,11 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { return nil } + +// IsInterfaceNil returns true if there is no value under the interface +func (rtxh *rewardsHandler) IsInterfaceNil() bool { + if rtxh == nil { + return true + } + return false +} diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 9f3bc450976..28d4ad3f01c 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -185,3 +185,4 @@ func (ppcm *preProcessorsContainerFactory) IsInterfaceNil() bool { } return false } + diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go index a93ab345953..58d87319a47 100644 --- a/process/rewardTransaction/interceptor.go +++ b/process/rewardTransaction/interceptor.go @@ -149,3 +149,11 @@ func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardT cacherIdentifier, ) } + +// IsInterfaceNil returns true if there is no value under the interface +func (rti *RewardTxInterceptor) IsInterfaceNil() bool { + if rti == nil { + return true + } + return false +} From da211143774b674c6632902d2d834be6c999d610 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 3 Sep 2019 22:00:52 +0300 Subject: [PATCH 080/234] cmd: missed config parameter for rewards storage --- cmd/node/config/config.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 8595b1136b5..fd1d304947a 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -114,6 +114,7 @@ Type = "LvlDBSerial" BatchDelaySeconds = 15 MaxBatchSize = 500 + MaxOpenFiles = 10 [ShardHdrNonceHashStorage] [ShardHdrNonceHashStorage.Cache] From 2530f76b7e1fdfe669bcd4d30d2a621f7c0339d6 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 5 Sep 2019 17:32:18 +0300 Subject: [PATCH 081/234] process, consensus, integrationTests: fix cross shard rewards --- consensus/mock/blockProcessorMock.go | 2 +- .../spos/commonSubround/subroundStartRound.go | 2 +- data/address/specialAddresses.go | 21 ++++++++-- integrationTests/mock/blockProcessorMock.go | 4 +- .../mock/specialAddressHandlerMock.go | 27 ++++++++++-- .../smartContract/testInitilalizer.go | 15 +++++-- integrationTests/testInitializer.go | 41 ++++++++++++++++++- integrationTests/testProcessorNode.go | 13 +++--- .../testProcessorNodeWithMultisigner.go | 4 +- node/mock/blockProcessorStub.go | 2 +- process/block/displayBlock.go | 2 +- process/block/metablock.go | 6 ++- .../block/preprocess/rewardTxPreProcessor.go | 9 +--- process/block/preprocess/rewardsHandler.go | 8 ++++ .../block/preprocess/rewardsHandler_test.go | 7 +++- process/block/shardblock.go | 6 +-- process/interface.go | 8 ++-- process/mock/blockProcessorMock.go | 2 +- process/mock/specialAddressHandlerMock.go | 30 +++++++++++--- process/rewardTransaction/interceptor.go | 8 ---- 20 files changed, 157 insertions(+), 60 deletions(-) diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index f472b4a3b0d..6b8333c784c 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(addresses []string) { +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index d89704d8aea..4c68e915350 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -233,7 +233,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses) + sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses, uint64(sr.RoundIndex)) return nil } diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 0c901211823..d19d13d0037 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -10,8 +10,11 @@ type specialAddresses struct { elrond []byte consensusRewardAddresses []string burnAddress []byte - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + + epoch uint32 + round uint64 + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewSpecialAddressHolder creates a special address holder @@ -60,8 +63,10 @@ func (sp *specialAddresses) BurnAddress() []byte { } // SetConsensusRewardAddresses sets the consensus rewards addresses for the round -func (sp *specialAddresses) SetConsensusRewardAddresses(consensusRewardAddresses []string) { +func (sp *specialAddresses) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { sp.consensusRewardAddresses = consensusRewardAddresses + sp.round = round + sp.epoch = epoch } // LeaderAddress provides leader address @@ -78,6 +83,14 @@ func (sp *specialAddresses) ConsensusRewardAddresses() []string { return sp.consensusRewardAddresses } +func (sp *specialAddresses) Round() uint64 { + return sp.round +} + +func (sp *specialAddresses) Epoch() uint32 { + return sp.epoch +} + // ShardIdForAddress calculates shard id for address func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) @@ -94,4 +107,4 @@ func (sp *specialAddresses) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 0a15b3aab12..67338af603c 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -93,9 +93,9 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(address []string) { +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { if blProcMock.SetConsensusRewardAddressesCalled != nil { - blProcMock.SetConsensusRewardAddressesCalled(address) + blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) } } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 32dc02aad8a..f366ba37582 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -1,18 +1,28 @@ package mock +import ( + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte LeaderAddressCalled func() []byte BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + addresses []string + epoch uint32 + round uint64 } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetConsensusRewardAddresses(consensusRewardAddresses []string) { +func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { sh.addresses = consensusRewardAddresses } @@ -44,12 +54,21 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { return sh.LeaderAddressCalled() } +func (sh *SpecialAddressHandlerMock) Round() uint64 { + return sh.round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + return sh.epoch +} + func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - if sh.ShardIdForAddressCalled == nil { - return 0, nil + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err } - return sh.ShardIdForAddressCalled(addr) + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index bc86249d949..fb4ebfb974b 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -339,7 +339,10 @@ func createNetNode( testMarshalizer, testHasher, testAddressConverter, - &mock.SpecialAddressHandlerMock{}, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, store, dPool, ) @@ -420,7 +423,10 @@ func createNetNode( accntAdapter, shardCoordinator, nodesCoordinator, - &mock.SpecialAddressHandlerMock{}, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { return nil @@ -760,7 +766,10 @@ func createMetaNetNode( }, shardCoordinator, nodesCoordinator, - &mock.SpecialAddressHandlerMock{}, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, testHasher, testMarshalizer, store, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index e80970c06fa..8e89cfbba7c 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -905,6 +905,22 @@ func ComputeAndRequestMissingTransactions( } } +func ComputeAndRequestMissingRewardTxs( + nodes []*TestProcessorNode, + generatedDataHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, +) { + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } + + neededData := getMissingRewardTxsForNode(n, generatedDataHashes) + requestMissingRewardTxs(n, shardResolver, neededData) + } +} + func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { neededTxs := make([][]byte, 0) @@ -919,6 +935,20 @@ func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][] return neededTxs } +func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { + neededTxs := make([][]byte, 0) + + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) + if !ok { + //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } + + return neededTxs +} + func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, neededTxs [][]byte) { txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) @@ -927,6 +957,14 @@ func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, need } } +func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededData [][]byte) { + dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) + + for i := 0; i < len(neededData); i++ { + _ = dataResolver.RequestDataFromHash(neededData[i]) + } +} + // CreateRequesterDataPool creates a datapool with a mock txPool func CreateRequesterDataPool( t *testing.T, @@ -1135,7 +1173,8 @@ func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]shard for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i][:32])) + address := []byte(shardNodesPks[i][:32]) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) shardValidators = append(shardValidators, v) } validatorsMap[shardId] = shardValidators diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index f9247252cec..16c79207a55 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -188,7 +188,10 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 } func (tpn *TestProcessorNode) initTestNode() { - tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{} + tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ + ShardCoordinator:tpn.ShardCoordinator, + AdrConv: TestAddressConverter, + } tpn.initStorage() tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() @@ -563,11 +566,11 @@ func (tpn *TestProcessorNode) addHandlersForCounters() { tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) + tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) } - } // LoadTxSignSkBytes alters the already generated sk/pk pair @@ -579,12 +582,6 @@ func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { haveTime := func() bool { return true } - //addresses := []string{ - // "rewardAddr0000000000000000000000", - // "rewardAddr0000000000000000000001", - //} - //tpn.BlockProcessor.SetConsensusRewardAddresses(addresses) - blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) if err != nil { fmt.Println(err.Error()) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 0ba2a13e912..b2853b81148 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -124,8 +124,8 @@ func ProposeBlockWithConsensusSignature( adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) // set the consensus reward addresses - for _, node := range nodesMap[shardId]{ - node.BlockProcessor.SetConsensusRewardAddresses(adddresses) + for _, node := range nodesMap[shardId] { + node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) } consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 546057a7d05..64560dbc1e7 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -72,7 +72,7 @@ func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string) { +func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string, uint64) { panic("implement me") } diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 5771eea6649..f0d3329c7d6 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -176,7 +176,7 @@ func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, bod for i := 0; i < len(body); i++ { miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d", miniBlock.ReceiverShardID) + part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/metablock.go b/process/block/metablock.go index 95a2c0ec8ad..b7733bec771 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -218,7 +218,7 @@ func (mp *metaProcessor) ProcessBlock( } // SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { +func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { // TODO set the reward addresses for metachain consensus nodes } @@ -1239,9 +1239,11 @@ func displayShardInfo(lines []*display.LineData, header *block.MetaBlock) []*dis for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { if j == 0 || j >= len(shardData.ShardMiniBlockHeaders)-1 { + senderShard := shardData.ShardMiniBlockHeaders[j].SenderShardId + receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId lines = append(lines, display.NewLineData(false, []string{ "", - fmt.Sprintf("ShardMiniBlockHeaderHash_%d", j+1), + fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) } else if j == 1 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index b24eb90b2cb..eaaeae1cda7 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -177,13 +177,6 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round if miniBlock.Type != block.RewardsBlockType { continue } - //if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { - // continue - //} - //if miniBlock.SenderShardID == rtp.shardCoordinator.SelfId() { - // // if sender is the shard, then do this later when reward txs from fee are generated - // continue - //} for j := 0; j < len(miniBlock.TxHashes); j++ { if haveTime() < 0 { @@ -246,7 +239,7 @@ func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMinib func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { for i := 0; i < len(body); i++ { miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlockType || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + if miniBlock.Type != block.RewardsBlockType { continue } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 00dde118180..3182b4e818d 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -290,6 +290,8 @@ func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) currTx.RcvAddr = rtxh.address.LeaderAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() return currTx } @@ -300,6 +302,8 @@ func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) currTx.RcvAddr = rtxh.address.BurnAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() return currTx } @@ -310,6 +314,8 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() return currTx } @@ -346,6 +352,8 @@ func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { rTx.Value = rewardValue rTx.RcvAddr = []byte(address) rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = rtxh.address.Epoch() + rTx.Round = rtxh.address.Round() consensusRewardTxs = append(consensusRewardTxs, rTx) } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index b3c3fe7f932..81cc957d4d5 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -267,12 +267,15 @@ func TestRewardTxHandlerVerifyCreatedRewardsTxs(t *testing.T) { func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { t.Parallel() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) tdp := initDataPool() th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, + &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator}, &mock.HasherMock{}, &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), + shardCoordinator, &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), diff --git a/process/block/shardblock.go b/process/block/shardblock.go index b22cbbc334c..f8da0f06831 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -202,7 +202,7 @@ func (sp *shardProcessor) ProcessBlock( sp.shardCoordinator.SelfId(), ) - sp.SetConsensusRewardAddresses(consensusAddresses) + sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) sp.txCoordinator.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -272,8 +272,8 @@ func (sp *shardProcessor) ProcessBlock( } // SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { - sp.specialAddressHandler.SetConsensusRewardAddresses(consensusRewardAddresses) +func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { + sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) } // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction diff --git a/process/interface.go b/process/interface.go index 88d10604ff1..ef59d2b67d0 100644 --- a/process/interface.go +++ b/process/interface.go @@ -29,7 +29,7 @@ type RewardTransactionProcessor interface { } // RewardTransactionPreProcessor prepares the processing of reward transactions -type RewardTransactionPreProcessor interface{ +type RewardTransactionPreProcessor interface { AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) } @@ -114,11 +114,13 @@ type TransactionFeeHandler interface { type SpecialAddressHandler interface { SetElrondCommunityAddress(elrond []byte) ElrondCommunityAddress() []byte - SetConsensusRewardAddresses(consensusRewardAddresses []string) + SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) ConsensusRewardAddresses() []string LeaderAddress() []byte BurnAddress() []byte ShardIdForAddress([]byte) (uint32, error) + Round() uint64 + Epoch() uint32 IsInterfaceNil() bool } @@ -156,7 +158,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string) + SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) IsInterfaceNil() bool } diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 4c46121639b..3e3687761d0 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string) { +func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string, uint64) { panic("implement me") } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 1b59991984d..597e4a3364d 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -1,19 +1,30 @@ package mock +import ( + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + type SpecialAddressHandlerMock struct { ElrondCommunityAddressCalled func() []byte LeaderAddressCalled func() []byte BurnAddressCalled func() []byte ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator addresses []string + epoch uint32 + round uint64 } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetConsensusRewardAddresses(consensusRewardAddresses []string) { +func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { sh.addresses = consensusRewardAddresses + sh.epoch = epoch + sh.round = round } func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { @@ -36,6 +47,14 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { return sh.ElrondCommunityAddressCalled() } +func (sh *SpecialAddressHandlerMock) Round() uint64 { + return sh.round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + return sh.epoch +} + func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { if sh.LeaderAddressCalled == nil { return []byte("leader") @@ -45,11 +64,12 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - if sh.ShardIdForAddressCalled == nil { - return 0, nil + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err } - return sh.ShardIdForAddressCalled(addr) + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface @@ -58,4 +78,4 @@ func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go index 58d87319a47..a0b855818fb 100644 --- a/process/rewardTransaction/interceptor.go +++ b/process/rewardTransaction/interceptor.go @@ -134,14 +134,6 @@ func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend [] } func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { - //TODO should remove this as it is expensive - err := rti.rewardTxStorer.Has(rTx.Hash()) - isRTxInStorage := err == nil - if isRTxInStorage { - log.Debug("intercepted reward tx already processed") - return - } - cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) rti.rewardTxPool.AddData( rTx.Hash(), From 61166bff5f0231e79e8f8fdf444096c2d5d5e264 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 5 Sep 2019 18:37:12 +0300 Subject: [PATCH 082/234] process: fix after merge --- .../intermediateProcessorsContainerFactory.go | 1 + process/unsigned/feeTxHandler.go | 48 +++++++++++++++---- process/unsigned/feeTxHandler_test.go | 12 +++++ 3 files changed, 53 insertions(+), 8 deletions(-) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 4a2bf0f36d0..a4525ea2428 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -104,6 +104,7 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn func (ppcm *intermediateProcessorsContainerFactory) createTxFeeIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := unsigned.NewFeeTxHandler( ppcm.specialAddressHandler, + ppcm.shardCoordinator, ppcm.hasher, ppcm.marshalizer, ) diff --git a/process/unsigned/feeTxHandler.go b/process/unsigned/feeTxHandler.go index 9ea6537011d..243c6079b51 100644 --- a/process/unsigned/feeTxHandler.go +++ b/process/unsigned/feeTxHandler.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" "math/big" "sync" ) @@ -24,11 +25,12 @@ const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% const burnPercentage = 0.5 // 1 = 100%, 0 = 0% type feeTxHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mutTxs sync.Mutex - feeTxs []*feeTx.FeeTx + address process.SpecialAddressHandler + shardCoordinator sharding.Coordinator + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mutTxs sync.Mutex + feeTxs []*feeTx.FeeTx feeTxsFromBlock map[string]*feeTx.FeeTx } @@ -36,12 +38,16 @@ type feeTxHandler struct { // NewFeeTxHandler constructor for the fx tee handler func NewFeeTxHandler( address process.SpecialAddressHandler, + shardCoordinator sharding.Coordinator, hasher hashing.Hasher, marshalizer marshal.Marshalizer, ) (*feeTxHandler, error) { if address == nil { return nil, process.ErrNilSpecialAddressHandler } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } if hasher == nil { return nil, process.ErrNilHasher } @@ -50,9 +56,10 @@ func NewFeeTxHandler( } ftxh := &feeTxHandler{ - address: address, - hasher: hasher, - marshalizer: marshalizer, + address: address, + shardCoordinator: shardCoordinator, + hasher: hasher, + marshalizer: marshalizer, } ftxh.feeTxs = make([]*feeTx.FeeTx, 0) ftxh.feeTxsFromBlock = make(map[string]*feeTx.FeeTx) @@ -256,6 +263,31 @@ func (ftxh *feeTxHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, er return make([][]byte, 0), nil } +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (ftxh *feeTxHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + ftxh.mutTxs.Lock() + + txFeePool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range ftxh.feeTxsFromBlock { + + senderShard := txInfo.ShardId + receiverShard, err := ftxh.address.ShardIdForAddress(txInfo.RcvAddr) + if err != nil { + continue + } + if receiverShard != ftxh.shardCoordinator.SelfId() { + continue + } + if senderShard != ftxh.shardCoordinator.SelfId() { + continue + } + txFeePool[txHash] = txInfo + } + ftxh.mutTxs.Unlock() + + return txFeePool +} + // IsInterfaceNil returns true if there is no value under the interface func (ftxh *feeTxHandler) IsInterfaceNil() bool { if ftxh == nil { diff --git a/process/unsigned/feeTxHandler_test.go b/process/unsigned/feeTxHandler_test.go index dc918403543..99a6d0790b8 100644 --- a/process/unsigned/feeTxHandler_test.go +++ b/process/unsigned/feeTxHandler_test.go @@ -15,6 +15,7 @@ func TestNewFeeTxHandler_NilSpecialAddress(t *testing.T) { th, err := NewFeeTxHandler( nil, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -28,6 +29,7 @@ func TestNewFeeTxHandler_NilHasher(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), nil, &mock.MarshalizerMock{}, ) @@ -41,6 +43,7 @@ func TestNewFeeTxHandler_NilMarshalizer(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, nil, ) @@ -54,6 +57,7 @@ func TestNewFeeTxHandler_ValsOk(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -67,6 +71,7 @@ func TestFeeTxHandler_AddIntermediateTransactions(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -83,6 +88,7 @@ func TestFeeTxHandler_AddProcessedUTx(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -105,6 +111,7 @@ func TestFeeTxHandler_AddTxFeeFromBlock(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -127,6 +134,7 @@ func TestFeeTxHandler_CleanProcessedUTxs(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -149,6 +157,7 @@ func TestFeeTxHandler_CreateAllUTxs(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -178,6 +187,7 @@ func TestFeeTxHandler_VerifyCreatedUTxs(t *testing.T) { addr := &mock.SpecialAddressHandlerMock{} th, err := NewFeeTxHandler( addr, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -227,6 +237,7 @@ func TestFeeTxHandler_CreateAllInterMiniBlocks(t *testing.T) { th, err := NewFeeTxHandler( &mock.SpecialAddressHandlerMock{}, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) @@ -250,6 +261,7 @@ func TestFeeTxHandler_VerifyInterMiniBlocks(t *testing.T) { addr := &mock.SpecialAddressHandlerMock{} th, err := NewFeeTxHandler( addr, + mock.NewMultipleShardsCoordinatorMock(), &mock.HasherMock{}, &mock.MarshalizerMock{}, ) From 7a44f39659cb5942a7024f743e4949d9e195076a Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 6 Sep 2019 10:17:31 +0300 Subject: [PATCH 083/234] process, core: elastic indexing rewards --- core/indexer/elasticsearch.go | 34 +++++++++++++++++++ process/block/preprocess/rewardsHandler.go | 26 ++++++++++++++ .../intermediateProcessorsContainerFactory.go | 1 - 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index a6c126cb2cb..6dbd8ce6656 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -5,9 +5,11 @@ import ( "context" "encoding/hex" "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "io" "math/big" "net/http" + "strconv" "strings" "time" @@ -505,6 +507,11 @@ func getTransactionByType( return buildSmartContractResult(currentSc, txHash, mbHash, blockHash, mb, header) } + currentReward, ok := tx.(*rewardTx.RewardTx) + if ok && currentReward != nil { + return buildRewardTransaction(currentReward, txHash, mbHash, blockHash, mb, header) + } + return nil } @@ -562,3 +569,30 @@ func buildSmartContractResult( Status: "Success", } } + +func buildRewardTransaction( + rTx *rewardTx.RewardTx, + txHash []byte, + mbHash []byte, + blockHash []byte, + mb *block.MiniBlock, + header data.HeaderHandler, +) *Transaction { + return &Transaction{ + Hash: hex.EncodeToString(txHash), + MBHash: hex.EncodeToString(mbHash), + BlockHash: hex.EncodeToString(blockHash), + Nonce: 0, + Value: rTx.Value, + Receiver: hex.EncodeToString(rTx.RcvAddr), + Sender: hex.EncodeToString([]byte(strconv.Itoa(int(rTx.ShardId)))), + ReceiverShard: mb.ReceiverShardID, + SenderShard: mb.SenderShardID, + GasPrice: 0, + GasLimit: 0, + Data: "", + Signature: "", + Timestamp: time.Duration(header.GetTimeStamp()), + Status: "Success", + } +} diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 3182b4e818d..19aa5624463 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -406,6 +406,32 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { return nil } + +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range rtxh.rewardTxsForBlock { + + senderShard := txInfo.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = txInfo + } + rtxh.mut.Unlock() + + return rewardTxPool +} + // IsInterfaceNil returns true if there is no value under the interface func (rtxh *rewardsHandler) IsInterfaceNil() bool { if rtxh == nil { diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index d55f5262c94..e9fa4b91f81 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -109,7 +109,6 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, - ppcm.shardCoordinator, ppcm.hasher, ppcm.marshalizer, ppcm.shardCoordinator, From 8b8cfab7cbd9e823bf64b111e1ace9c34bef7057 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 6 Sep 2019 11:40:40 +0300 Subject: [PATCH 084/234] core: indexer make sender address for reward transactions user friendly --- core/indexer/data.go | 1 + core/indexer/elasticsearch.go | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/core/indexer/data.go b/core/indexer/data.go index 21917298663..a3ac6d48bbe 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -13,6 +13,7 @@ type Transaction struct { MBHash string `json:"miniBlockHash"` BlockHash string `json:"blockHash"` Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` Value *big.Int `json:"value"` Receiver string `json:"receiver"` Sender string `json:"sender"` diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 6dbd8ce6656..3acd0dafcfd 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -9,7 +9,6 @@ import ( "io" "math/big" "net/http" - "strconv" "strings" "time" @@ -529,6 +528,7 @@ func buildTransaction( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: tx.Nonce, + Round: header.GetRound(), Value: tx.Value, Receiver: hex.EncodeToString(tx.RcvAddr), Sender: hex.EncodeToString(tx.SndAddr), @@ -556,6 +556,7 @@ func buildSmartContractResult( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: scr.Nonce, + Round: header.GetRound(), Value: scr.Value, Receiver: hex.EncodeToString(scr.RcvAddr), Sender: hex.EncodeToString(scr.SndAddr), @@ -578,14 +579,23 @@ func buildRewardTransaction( mb *block.MiniBlock, header data.HeaderHandler, ) *Transaction { + + shardIdStr := []byte(fmt.Sprintf("Shard%d", rTx.ShardId)) + lenShardIdStr := len(shardIdStr) + // address is 32 size + addressFrom := make([]byte, 32) + lenKeep := len(addressFrom) - lenShardIdStr + addressFrom = append(addressFrom[:lenKeep], shardIdStr...) + return &Transaction{ Hash: hex.EncodeToString(txHash), MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: 0, + Round: rTx.Round, Value: rTx.Value, Receiver: hex.EncodeToString(rTx.RcvAddr), - Sender: hex.EncodeToString([]byte(strconv.Itoa(int(rTx.ShardId)))), + Sender: string(addressFrom), ReceiverShard: mb.ReceiverShardID, SenderShard: mb.SenderShardID, GasPrice: 0, From c5dc15f8b785b5c2d2b626a998858da07dbfdee7 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 6 Sep 2019 11:44:45 +0300 Subject: [PATCH 085/234] core: no need for 32 byte long address for reward transaction sender --- core/indexer/elasticsearch.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 3acd0dafcfd..e11974d95ac 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -580,12 +580,7 @@ func buildRewardTransaction( header data.HeaderHandler, ) *Transaction { - shardIdStr := []byte(fmt.Sprintf("Shard%d", rTx.ShardId)) - lenShardIdStr := len(shardIdStr) - // address is 32 size - addressFrom := make([]byte, 32) - lenKeep := len(addressFrom) - lenShardIdStr - addressFrom = append(addressFrom[:lenKeep], shardIdStr...) + shardIdStr := fmt.Sprintf("Shard%d", rTx.ShardId) return &Transaction{ Hash: hex.EncodeToString(txHash), @@ -595,7 +590,7 @@ func buildRewardTransaction( Round: rTx.Round, Value: rTx.Value, Receiver: hex.EncodeToString(rTx.RcvAddr), - Sender: string(addressFrom), + Sender: shardIdStr, ReceiverShard: mb.ReceiverShardID, SenderShard: mb.SenderShardID, GasPrice: 0, From 3d81cdc35200b951a39231ae71493295cf05c122 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 9 Sep 2019 10:36:49 +0300 Subject: [PATCH 086/234] oricess, data, consensus, config, cmd: cleanup and fix review comments reward transactions --- cmd/node/config/config.toml | 2 +- cmd/node/factory/structs.go | 30 +- cmd/node/main.go | 4 +- config/config.go | 8 +- config/tomlConfig_test.go | 6 +- consensus/mock/nodesCoordinatorMock.go | 1 - .../spos/commonSubround/subroundStartRound.go | 1 - data/address/specialAddresses.go | 2 +- data/block/block.go | 6 +- data/mock/unsignedTxHandlerMock.go | 3 +- dataRetriever/errors.go | 4 +- .../requestHandlers/requestHandler.go | 26 +- integrationTests/mock/nodesCoordinatorMock.go | 1 - .../smartContract/testInitilalizer.go | 6 +- integrationTests/testProcessorNode.go | 10 +- node/mock/nodesCoordinatorMock.go | 1 - process/block/baseProcess.go | 5 + process/block/baseProcess_test.go | 8 +- process/block/metablock.go | 5 - .../block/preprocess/rewardTxPreProcessor.go | 19 +- process/block/preprocess/rewardsHandler.go | 349 +- .../preprocess/smartContractResults_test.go | 6 +- process/block/preprocess/transactions_test.go | 4 +- process/block/shardblock.go | 8 +- process/block/shardblock_test.go | 9106 +++++++++-------- process/coordinator/process.go | 32 +- process/coordinator/process_test.go | 38 +- .../intermediateProcessorsContainerFactory.go | 2 +- .../shard/preProcessorsContainerFactory.go | 2 +- .../preProcessorsContainerFactory_test.go | 24 +- process/interface.go | 4 +- process/mock/nodesCoordinatorMock.go | 232 +- ...{poolsHolderFake.go => poolsHolderMock.go} | 50 +- process/rewardTransaction/process.go | 2 + process/transaction/export_test.go | 33 +- process/transaction/process.go | 660 +- 36 files changed, 5387 insertions(+), 5313 deletions(-) rename process/mock/{poolsHolderFake.go => poolsHolderMock.go} (66%) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index b1970cd0f07..5a30b6e1f42 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -233,6 +233,6 @@ Timeout = 0 # Setting 0 means 'use default value' Version = 0 # Setting 0 means 'use default value' -[RewardConfig] +[EconomicsConfig] CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7b6d7770718..6dff22cc70b 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -403,7 +403,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, c type processComponentsFactoryArgs struct { genesisConfig *sharding.Genesis - rewardsConfig *config.RewardConfig + economicsConfig *config.EconomicsConfig nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator @@ -419,7 +419,7 @@ type processComponentsFactoryArgs struct { // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( genesisConfig *sharding.Genesis, - rewardsConfig *config.RewardConfig, + economicsConfig *config.EconomicsConfig, nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, @@ -433,7 +433,7 @@ func NewProcessComponentsFactoryArgs( ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ genesisConfig: genesisConfig, - rewardsConfig: rewardsConfig, + economicsConfig: economicsConfig, nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, @@ -501,7 +501,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err resolversFinder, args.shardCoordinator, args.nodesCoordinator, - args.rewardsConfig, + args.economicsConfig, args.data, args.core, args.state, @@ -1098,10 +1098,6 @@ func createNetMessenger( return nm, nil } -func createRewardParametersFromConfig() { - -} - func newInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, @@ -1294,7 +1290,7 @@ func newBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, - rewardsConfig *config.RewardConfig, + economicsConfig *config.EconomicsConfig, data *Data, core *Core, state *State, @@ -1304,14 +1300,20 @@ func newBlockProcessorAndTracker( coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - if rewardsConfig.CommunityAddress == "" || rewardsConfig.BurnAddress == ""{ + if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { return nil, nil, errors.New("rewards configuration missing") } - communityAddress, _ := hex.DecodeString(rewardsConfig.CommunityAddress) - burnAddress, _ := hex.DecodeString(rewardsConfig.BurnAddress) + communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) + if err != nil { + return nil, nil, err + } + + burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) + if err != nil { + return nil, nil, err + } - // TODO: construct this correctly on the PR specialAddressHolder, err := address.NewSpecialAddressHolder( communityAddress, burnAddress, @@ -1405,7 +1407,7 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlockType) + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) if err != nil { return nil, nil, err } diff --git a/cmd/node/main.go b/cmd/node/main.go index 62a9ab9f5ef..d7d1b064204 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -612,11 +612,11 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - rewardConfig := &generalConfig.RewardConfig + economicsConfig := &generalConfig.EconomicsConfig processArgs := factory.NewProcessComponentsFactoryArgs( genesisConfig, - rewardConfig, + economicsConfig, nodesConfig, syncer, shardCoordinator, diff --git a/config/config.go b/config/config.go index 639d17eeed0..4f7fd37289b 100644 --- a/config/config.go +++ b/config/config.go @@ -55,8 +55,8 @@ type NTPConfig struct { Version int } -// RewardConfig will hold the reward configuration -type RewardConfig struct { +// EconomicsConfig will hold the reward configuration +type EconomicsConfig struct { CommunityAddress string BurnAddress string } @@ -105,8 +105,8 @@ type Config struct { Consensus TypeConfig Explorer ExplorerConfig - NTPConfig NTPConfig - RewardConfig RewardConfig + NTPConfig NTPConfig + EconomicsConfig EconomicsConfig } // NodeConfig will hold basic p2p settings diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 11c1f9cd383..5773ddc42b5 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -74,7 +74,7 @@ func TestTomlParser(t *testing.T) { Consensus: TypeConfig{ Type: consensusType, }, - RewardConfig: RewardConfig{ + EconomicsConfig: EconomicsConfig{ CommunityAddress: communityAddress, BurnAddress: burnAddress, }, @@ -115,8 +115,8 @@ func TestTomlParser(t *testing.T) { [Consensus] Type = "` + consensusType + `" -[RewardConfig] - CommunityAddress = "` + communityAddress + `" +[EconomicsConfig] + CommunityAddress = "` + communityAddress + `" BurnAddress = "` + burnAddress + `" ` cfg := Config{} diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 43f7f9c450f..82d3d05ddcd 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -71,7 +71,6 @@ func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( } addresses := make([]string, 0) - for _, v := range validators { addresses = append(addresses, string(v.Address())) } diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 12058c3dd06..0d64ea2a395 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -226,7 +226,6 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error log.Info(fmt.Sprintf("\n")) sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses) return nil diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 910ce82b412..3044e1e0ee6 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -66,7 +66,7 @@ func (sp *specialAddresses) SetConsensusRewardAddresses(consensusRewardAddresses // LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - if sp.consensusRewardAddresses == nil { + if len(sp.consensusRewardAddresses) == 0 { return nil } diff --git a/data/block/block.go b/data/block/block.go index fc07c1f0098..dd9d0a8e394 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -34,8 +34,8 @@ const ( PeerBlock Type = 2 // SmartContractResultBlock identifies a miniblock holding smartcontractresults SmartContractResultBlock Type = 3 - // RewardsBlockType identifies a miniblock holding accumulated rewards, both system generated and from tx fees - RewardsBlockType Type = 4 + // RewardsBlock identifies a miniblock holding accumulated rewards, both system generated and from tx fees + RewardsBlock Type = 4 // InvalidBlock identifies identifies an invalid miniblock InvalidBlock Type = 5 ) @@ -51,7 +51,7 @@ func (bType Type) String() string { return "PeerBody" case SmartContractResultBlock: return "SmartContractResultBody" - case RewardsBlockType: + case RewardsBlock: return "RewardsBody" case InvalidBlock: return "InvalidBlock" diff --git a/data/mock/unsignedTxHandlerMock.go b/data/mock/unsignedTxHandlerMock.go index 128aad96e97..7097c4a31e8 100644 --- a/data/mock/unsignedTxHandlerMock.go +++ b/data/mock/unsignedTxHandlerMock.go @@ -1,8 +1,9 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data" "math/big" + + "github.com/ElrondNetwork/elrond-go/data" ) type UnsignedTxHandlerMock struct { diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index e8f681521d9..de32e92ed53 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -1,7 +1,7 @@ package dataRetriever import ( - "errors" + "errors" ) // ErrNilMessage signals that a nil message has been received @@ -65,7 +65,7 @@ var ErrNilTxDataPool = errors.New("nil transaction data pool") var ErrNilUnsignedTransactionPool = errors.New("nil unsigned transactions data pool") // ErrNilRewardTransactionPool signals that a nil reward transactions pool has been provided -var ErrNilRewardTransactionPool = errors.New("nil fee transaction data pool") +var ErrNilRewardTransactionPool = errors.New("nil reward transaction data pool") // ErrNilHeadersDataPool signals that a nil header pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index b212e9ace89..1189290ed73 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -10,7 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -type ResolverRequestHandler struct { +type resolverRequestHandler struct { resolversFinder dataRetriever.ResolversFinder txRequestTopic string scrRequestTopic string @@ -32,7 +32,7 @@ func NewShardResolverRequestHandler( mbRequestTopic string, hdrRequestTopic string, maxTxsToRequest int, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil { return nil, dataRetriever.ErrNilResolverFinder } @@ -55,7 +55,7 @@ func NewShardResolverRequestHandler( return nil, dataRetriever.ErrInvalidMaxTxRequest } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, txRequestTopic: txRequestTopic, mbRequestTopic: mbRequestTopic, @@ -73,7 +73,7 @@ func NewShardResolverRequestHandler( func NewMetaResolverRequestHandler( finder dataRetriever.ResolversFinder, hdrRequestTopic string, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil { return nil, dataRetriever.ErrNilResolverFinder } @@ -81,7 +81,7 @@ func NewMetaResolverRequestHandler( return nil, dataRetriever.ErrEmptyHeaderRequestTopic } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, hdrRequestTopic: hdrRequestTopic, isMetaChain: true, @@ -91,11 +91,11 @@ func NewMetaResolverRequestHandler( } // RequestTransaction method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { rrh.requestByHashes(destShardID, txHashes, rrh.txRequestTopic) } -func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { +func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { log.Debug(fmt.Sprintf("Requesting %d transactions from shard %d from network on topic %s...\n", len(hashes), destShardID, topic)) resolver, err := rrh.resolversFinder.CrossShardResolver(topic, destShardID) if err != nil { @@ -127,26 +127,26 @@ func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [] } // RequestUnsignedTransactions method asks for unsigned transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { rrh.requestByHashes(destShardID, scrHashes, rrh.scrRequestTopic) } // RequestRewardTransactions requests for reward transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte){ +func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte){ rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) } // RequestMiniBlock method asks for miniblocks from the connected peers -func (rrh *ResolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { +func (rrh *resolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { rrh.requestByHash(shardId, miniblockHash, rrh.mbRequestTopic) } // RequestHeader method asks for header from the connected peers -func (rrh *ResolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { +func (rrh *resolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { rrh.requestByHash(shardId, hash, rrh.hdrRequestTopic) } -func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { +func (rrh *resolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { log.Debug(fmt.Sprintf("Requesting %s from shard %d with hash %s from network\n", baseTopic, destShardID, core.ToB64(hash))) var resolver dataRetriever.Resolver @@ -170,7 +170,7 @@ func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte } // RequestHeaderByNonce method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { +func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { var err error var resolver dataRetriever.Resolver if rrh.isMetaChain { diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 38a046473e0..f6dbe546f58 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -63,7 +63,6 @@ func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( } addresses := make([]string, 0) - for _, v := range validators { addresses = append(addresses, string(v.Address())) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 86cb1e284ec..a6c8b56d229 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "math/big" "math/rand" "strings" @@ -49,6 +48,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" @@ -56,7 +56,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) @@ -343,7 +343,7 @@ func createNetNode( ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlockType) + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) rewardsHandler, _ := rewardsInter.(process.UnsignedTxHandler) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 51064493e8b..f0c389d562d 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "sync/atomic" "time" @@ -36,11 +35,12 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" ) @@ -81,7 +81,7 @@ type TestProcessorNode struct { Messenger p2p.Messenger OwnAccount *TestWalletAccount - NodeKeys *TestKeyPair + NodeKeys *TestKeyPair ShardDataPool dataRetriever.PoolsHolder MetaDataPool dataRetriever.MetaPoolsHolder @@ -354,11 +354,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScrForwarder, &mock.UnsignedTxHandlerMock{}, ) - tpn.RewardsProcessor, _=rewardTransaction.NewRewardTxProcessor( + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( tpn.AccntState, TestAddressConverter, tpn.ShardCoordinator, - ) + ) txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 198b38f2c32..165e54b8635 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -75,7 +75,6 @@ func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( } addresses := make([]string, 0) - for _, v := range validators { addresses = append(addresses, string(v.Address())) } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 89e391337ec..22040927c1b 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -245,6 +245,11 @@ func (bp *baseProcessor) checkHeaderTypeCorrect(shardId uint32, hdr data.HeaderH return nil } +// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group +func (bp *baseProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { + bp.specialAddressHandler.SetConsensusRewardAddresses(consensusRewardAddresses) +} + func (bp *baseProcessor) removeNotarizedHdrsBehindFinal(hdrsToAttestFinality uint32) { bp.mutNotarizedHdrs.Lock() for shardId := range bp.notarizedHdrs { diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index eae98a048ac..75bf37aaa43 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3,7 +3,6 @@ package block_test import ( "bytes" "errors" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "reflect" "testing" @@ -11,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" @@ -52,7 +52,7 @@ func generateTestUnit() storage.Storer { func createShardedDataChacherNotifier( handler data.TransactionHandler, testHash []byte, -) (func() dataRetriever.ShardedDataCacherNotifier ) { +) func() dataRetriever.ShardedDataCacherNotifier { return func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{ RegisterHandlerCalled: func(i func(key []byte)) {}, @@ -98,9 +98,9 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { rewardTransactionsCalled := createShardedDataChacherNotifier(rewardTx, testHash) sdp := &mock.PoolsHolderStub{ - TransactionsCalled: txCalled, + TransactionsCalled: txCalled, UnsignedTransactionsCalled: unsignedTxCalled, - RewardTransactionsCalled: rewardTransactionsCalled, + RewardTransactionsCalled: rewardTransactionsCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, diff --git a/process/block/metablock.go b/process/block/metablock.go index 017fca171c6..cdd41a96770 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -211,11 +211,6 @@ func (mp *metaProcessor) ProcessBlock( return nil } -// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { - // TODO set the reward addresses for metachain consensus nodes -} - func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) if err != nil { diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 2aaae5b01ac..1d0f7907b57 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -118,7 +118,7 @@ func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlo return process.ErrNilTxBlockBody } - err := rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlockType) + err := rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) return err } @@ -137,7 +137,7 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( rewardTxsRestored := 0 for i := 0; i < len(body); i++ { miniBlock := body[i] - if miniBlock.Type != block.RewardsBlockType { + if miniBlock.Type != block.RewardsBlock { continue } @@ -174,7 +174,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] - if miniBlock.Type != block.RewardsBlockType { + if miniBlock.Type != block.RewardsBlock { continue } if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { @@ -218,7 +218,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { for i := 0; i < len(body); i++ { miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlockType || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + if miniBlock.Type != block.RewardsBlock || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { continue } @@ -280,7 +280,7 @@ func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { onlyRewardTxsFromOthersBody := block.Body{} for _, mb := range body { - if mb.Type != block.RewardsBlockType { + if mb.Type != block.RewardsBlock { continue } if mb.SenderShardID == rtp.shardCoordinator.SelfId() { @@ -294,7 +294,7 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod onlyRewardTxsFromOthersBody, &rtp.rewardTxsForBlock, rtp.chReceivedAllRewardTxs, - block.RewardsBlockType, + block.RewardsBlock, rtp.rewardTxPool, ) @@ -334,7 +334,7 @@ func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBl // computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { missingRewardTxs := make([][]byte, 0) - if mb.Type != block.RewardsBlockType { + if mb.Type != block.RewardsBlock { return missingRewardTxs } @@ -399,7 +399,7 @@ func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardI // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { - if miniBlock.Type != block.RewardsBlockType { + if miniBlock.Type != block.RewardsBlock { return process.ErrWrongTypeInMiniBlock } @@ -410,8 +410,7 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, ha for index := range miniBlockRewardTxs { if !haveTime() { - err = process.ErrTimeIsOut - return err + return process.ErrTimeIsOut } err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 5172f7cf3fc..d10d274111c 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,25 +1,18 @@ package preprocess import ( - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" ) -// MinGasPrice is the minimal gas price to be paid for any transaction -// TODO: Set MinGasPrice and MinTxFee to some positive value (TBD) -var MinGasPrice = uint64(0) - -// MinTxFee is the minimal fee to be paid for any transaction -var MinTxFee = uint64(0) - const communityPercentage = 0.1 // 1 = 100%, 0 = 0% const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% const burnPercentage = 0.5 // 1 = 100%, 0 = 0% @@ -28,169 +21,169 @@ const burnPercentage = 0.5 // 1 = 100%, 0 = 0% var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mut sync.Mutex - accumulatedFees *big.Int + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mut sync.Mutex + accumulatedFees *big.Int - rewardTxsFromBlock map[string]*rewardTx.RewardTx + rewardTxsFromBlock map[string]*rewardTx.RewardTx } // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( - address process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, ) (*rewardsHandler, error) { - if address == nil { - return nil, process.ErrNilSpecialAddressHandler - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - - rtxh := &rewardsHandler{ - address: address, - hasher: hasher, - marshalizer: marshalizer, - } - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) - - return rtxh, nil + if address == nil { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + rtxh := &rewardsHandler{ + address: address, + hasher: hasher, + marshalizer: marshalizer, + } + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil } // SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { - //TODO implement me - save only created accumulatedFees - return nil + //TODO implement me - save only created accumulatedFees + return nil } // AddIntermediateTransactions adds intermediate transactions to local cache func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { - return nil + return nil } // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - calculatedRewardTxs := rtxh.CreateAllUTxs() - - miniBlocks := make(map[uint32]*block.MiniBlock) - for _, rTx := range calculatedRewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - continue - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - continue - } - - var ok bool - var mb *block.MiniBlock - if mb, ok = miniBlocks[dstShId]; !ok { - mb = &block.MiniBlock{ - ReceiverShardID: dstShId, - } - } - - mb.TxHashes = append(mb.TxHashes, txHash) - miniBlocks[dstShId] = mb - } - - return miniBlocks + calculatedRewardTxs := rtxh.CreateAllUTxs() + + miniBlocks := make(map[uint32]*block.MiniBlock) + for _, rTx := range calculatedRewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks } // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { - err := rtxh.VerifyCreatedUTxs() - rtxh.CleanProcessedUTxs() + err := rtxh.VerifyCreatedUTxs() + rtxh.CleanProcessedUTxs() - return err + return err } // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { - rtxh.CleanProcessedUTxs() + rtxh.CleanProcessedUTxs() } // CleanProcessedUTxs deletes the cached data func (rtxh *rewardsHandler) CleanProcessedUTxs() { - rtxh.mut.Lock() - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) - rtxh.mut.Unlock() + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() } // AddRewardTxFromBlock adds an existing reward transaction from block into local cache func (rtxh *rewardsHandler) AddRewardTxFromBlock(tx data.TransactionHandler) { - currRewardTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - log.Error(process.ErrWrongTypeAssertion.Error()) - return - } - - rtxh.mut.Lock() - rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx - rtxh.mut.Unlock() + currRewardTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error(process.ErrWrongTypeAssertion.Error()) + return + } + + rtxh.mut.Lock() + rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx + rtxh.mut.Unlock() } // ProcessTransactionFee adds the tx cost to the accumulated amount func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { - if cost == nil { - log.Debug(process.ErrNilValue.Error()) - return - } - - rtxh.mut.Lock() - rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) - rtxh.mut.Unlock() + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { - x := new(big.Float).SetInt(value) - y := big.NewFloat(percentage) + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) - z := new(big.Float).Mul(x, y) + z := new(big.Float).Mul(x, y) - op := big.NewInt(0) - result, _ := z.Int(op) + op := big.NewInt(0) + result, _ := z.Int(op) - return result + return result } func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) - currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) - currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() - return currTx + return currTx } func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) - currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() - return currTx + return currTx } // CreateAllUTxs creates all the needed reward transactions @@ -198,86 +191,86 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { // to Elrond community fund. Fixed rewards for every validator are func (rtxh *rewardsHandler) CreateAllUTxs() []data.TransactionHandler { - rewardTxs := make([]data.TransactionHandler, 0) - rewardsFromFees := rtxh.createRewardTxsFromFee() - rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() + rewardTxs := make([]data.TransactionHandler, 0) + rewardsFromFees := rtxh.createRewardTxsFromFee() + rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() - rewardTxs = append(rewardTxs, rewardsFromFees...) - rewardTxs = append(rewardTxs, rewardsForConsensus...) + rewardTxs = append(rewardTxs, rewardsFromFees...) + rewardTxs = append(rewardTxs, rewardsForConsensus...) - return rewardTxs + return rewardTxs } func (rtxh *rewardsHandler) createRewardTxsFromFee() []data.TransactionHandler { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { - rtxh.accumulatedFees = big.NewInt(0) - return nil - } + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } - leaderTx := rtxh.createLeaderTx() - communityTx := rtxh.createCommunityTx() - burnTx := rtxh.createBurnTx() + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() - currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - rtxh.accumulatedFees = big.NewInt(0) + rtxh.accumulatedFees = big.NewInt(0) - return currFeeTxs + return currFeeTxs } func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.TransactionHandler { - consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() - consensusRewardTxs := make([]data.TransactionHandler, 0) - for _, address := range consensusRewardAddresses { - rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue - rTx.RcvAddr = []byte(address) + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardAddresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) - consensusRewardTxs = append(consensusRewardTxs, rTx) - } - return consensusRewardTxs + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + return consensusRewardTxs } // VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same func (rtxh *rewardsHandler) VerifyCreatedUTxs() error { - calculatedFeeTxs := rtxh.CreateAllUTxs() - - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - totalFeesFromBlock := big.NewInt(0) - for _, value := range rtxh.rewardTxsFromBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) - } - - totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedFeeTxs { - totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - - txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] - if !ok { - return process.ErrTxsFeesNotFound - } - if txFromBlock.Value.Cmp(value.GetValue()) != 0 { - return process.ErrTxsFeesDoNotMatch - } - } - - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - - return nil + calculatedFeeTxs := rtxh.CreateAllUTxs() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, value := range rtxh.rewardTxsFromBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedFeeTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] + if !ok { + return process.ErrTxsFeesNotFound + } + if txFromBlock.Value.Cmp(value.GetValue()) != 0 { + return process.ErrTxsFeesDoNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTotalTxsFeesDoNotMatch + } + + return nil } // CreateMarshalizedData creates the marshalized data for broadcasting purposes func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - // TODO: implement me + // TODO: implement me - return make([][]byte, 0), nil + return make([][]byte, 0), nil } diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 107dcf5d065..5b80880c94c 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -300,7 +300,7 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -352,7 +352,7 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) @@ -749,7 +749,7 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { return par, nil } - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 62cc1a56b91..c76cbf34190 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -376,7 +376,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -435,7 +435,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c48ed008874..fe11af9fc23 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -205,6 +205,9 @@ func (sp *shardProcessor) ProcessBlock( headerHandler.GetRound(), sp.shardCoordinator.SelfId(), ) + if err != nil { + return err + } sp.SetConsensusRewardAddresses(consensusAddresses) sp.txCoordinator.CreateBlockStarted() @@ -275,11 +278,6 @@ func (sp *shardProcessor) ProcessBlock( return nil } -// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string) { - sp.specialAddressHandler.SetConsensusRewardAddresses(consensusRewardAddresses) -} - // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { metablockCache := sp.dataPool.MetaBlocks() diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 611287cf46d..fb768dc80f4 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1,4831 +1,4885 @@ package block_test import ( - "bytes" - "errors" - "fmt" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/indexer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - blproc "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "bytes" + "errors" + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + blproc "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) //------- NewShardProcessor func initAccountsMock() *mock.AccountsStub { - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - return &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + return &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + } } -func initBasicTestData() (*mock.PoolsHolderFake, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { - tdp := mock.NewPoolsHolderFake() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Round: 1, - Nonce: 1, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash +func initBasicTestData() (*mock.PoolsHolderMock, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Round: 1, + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash } -func initBlockHeader(prevHash []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { - hdr := block.Header{ - Nonce: 2, - Round: 2, - PrevHash: prevHash, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - return hdr +func initBlockHeader(prevHash []byte, prevRandSeed []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { + hdr := block.Header{ + Nonce: 2, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevRandSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + return hdr } type methodFlags struct { - revertToSnapshotCalled bool - rootHashCalled bool + revertToSnapshotCalled bool + rootHashCalled bool } func defaultShardProcessor() (process.BlockProcessor, *methodFlags, error) { - // set accounts not dirty - flags := &methodFlags{} - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - flags.revertToSnapshotCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - flags.rootHashCalled = true - return []byte("rootHash"), nil - } - - accStub := initAccountsMock() - accStub.JournalLenCalled = journalLen - accStub.RevertToSnapshotCalled = revertToSnapshot - accStub.RootHashCalled = rootHashCalled - - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accStub, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - return sp, flags, err + // set accounts not dirty + flags := &methodFlags{} + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + flags.revertToSnapshotCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + flags.rootHashCalled = true + return []byte("rootHash"), nil + } + + accStub := initAccountsMock() + accStub.JournalLenCalled = journalLen + accStub.RevertToSnapshotCalled = revertToSnapshot + accStub.RootHashCalled = rootHashCalled + + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accStub, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + return sp, flags, err } //------- NewBlockProcessor func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - nil, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilDataPoolHolder, err) - assert.Nil(t, sp) + t.Parallel() + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + nil, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilDataPoolHolder, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilStoreShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - nil, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilStorage, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + nil, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilStorage, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - nil, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilHasher, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilMarshalizerShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - nil, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + nil, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilAccountsAdapterShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilAccountsAdapter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - nil, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilForkDetector, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilForkDetector, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - nil, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilBlocksTracker, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + nil, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilBlocksTracker, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - nil, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilRequestHandler, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + nil, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilRequestHandler, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTransactionPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return nil - } - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionPool, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return nil + } + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTxCoordinator(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - nil, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + nil, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilUint64Converter(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - nil, - ) - assert.Equal(t, process.ErrNilUint64Converter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + nil, + ) + assert.Equal(t, process.ErrNilUint64Converter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, err := defaultShardProcessor() - assert.Nil(t, err) - assert.NotNil(t, sp) + sp, _, err := defaultShardProcessor() + assert.Nil(t, err) + assert.NotNil(t, sp) } //------- ProcessBlock func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) - assert.Equal(t, process.ErrNilBlockChain, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - body := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) - assert.Equal(t, process.ErrNilBlockHeader, err) + sp, _, _ := defaultShardProcessor() + body := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) + assert.Equal(t, process.ErrNilBlockHeader, err) } func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) - assert.Equal(t, process.ErrNilBlockBody, err) + sp, _, _ := defaultShardProcessor() + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) + assert.Equal(t, process.ErrNilBlockBody, err) } func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) - assert.Equal(t, process.ErrNilHaveTimeHandler, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) + assert.Equal(t, process.ErrNilHaveTimeHandler, err) } func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - // set accounts dirty - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - Signature: []byte("signature"), - RootHash: []byte("roothash"), - } - body := make(block.Body, 0) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrAccountStateDirty) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + // set accounts dirty + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("roothash"), + } + body := make(block.Body, 0) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.NotNil(t, err) + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - sp, _, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + sp, _, _ := defaultShardProcessor() + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { return nil } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return process.ErrHigherNonceInTransaction - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err = sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { return nil } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return process.ErrHigherNonceInTransaction + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err = sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) } func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + t.Parallel() + + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + t.Parallel() + + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte("zzz"), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - }, - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) + t.Parallel() + + randSeed := []byte("rand seed") + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) } func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldRevertState(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - err := errors.New("process block transaction error") - txProcess := func(transaction *transaction.Transaction, round uint64) error { - return err - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} - store := &mock.ChainStorerMock{} - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - tpm, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, err, err2) - assert.True(t, wasCalled) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } + + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + + err := errors.New("process block transaction error") + txProcess := func(transaction *transaction.Transaction, round uint64) error { + return err + } + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} + store := &mock.ChainStorerMock{} + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + tpm, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, err, err2) + assert.True(t, wasCalled) } func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHashX"), - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) - assert.True(t, flags.revertToSnapshotCalled) + t.Parallel() + + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHashX"), + MiniBlockHeaders: mbHdrs, + } + + sp, flags, _ := defaultShardProcessor() + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.True(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Nil(t, err) - assert.False(t, flags.revertToSnapshotCalled) + t.Parallel() + + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + + sp, flags, _ := defaultShardProcessor() + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Nil(t, err) + assert.False(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tx := &transaction.Transaction{} - tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) + t.Parallel() + + randSeed := []byte("rand seed") + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tx := &transaction.Transaction{} + tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) + t.Parallel() + + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) { - t.Parallel() - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - currHdr := blkc.GetCurrentBlockHeader() - preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) - hdr := block.Header{ - Round: 2, - Nonce: 2, - PrevHash: preHash, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - haveTimeLessThanZero := func() time.Duration { - return -1 * time.Millisecond - } - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + currHdr := blkc.GetCurrentBlockHeader() + preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) + hdr := block.Header{ + Round: 2, + Nonce: 2, + PrevHash: preHash, + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + haveTimeLessThanZero := func() time.Duration { + return -1 * time.Millisecond + } + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 1, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 1, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } //------- checkAndRequestIfMetaHeadersMissing func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing.T) { - t.Parallel() - - hdrNoncesRequestCalled := int32(0) - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{ - RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { - atomic.AddInt32(&hdrNoncesRequestCalled, 1) - }, - }, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - - sp.CheckAndRequestIfMetaHeadersMissing(2) - time.Sleep(100 * time.Millisecond) - assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) - assert.Equal(t, err, process.ErrTimeIsOut) + t.Parallel() + + hdrNoncesRequestCalled := int32(0) + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") + + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{ + RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { + atomic.AddInt32(&hdrNoncesRequestCalled, 1) + }, + }, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + + sp.CheckAndRequestIfMetaHeadersMissing(2) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) + assert.Equal(t, err, process.ErrTimeIsOut) } //-------- isMetaHeaderFinal func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderFake() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) - res := sp.IsMetaHeaderFinal(&hdr, nil, 0) - assert.False(t, res) - res = sp.IsMetaHeaderFinal(nil, nil, 0) - assert.False(t, res) - - meta = &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - } - ordered, _ := sp.GetOrderedMetaBlocks(3) - res = sp.IsMetaHeaderFinal(meta, ordered, 0) - assert.True(t, res) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: metaHash, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + tdp.MetaBlocks().Put(metaHash, meta) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) + res := sp.IsMetaHeaderFinal(&hdr, nil, 0) + assert.False(t, res) + res = sp.IsMetaHeaderFinal(nil, nil, 0) + assert.False(t, res) + + meta = &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + ordered, _ := sp.GetOrderedMetaBlocks(3) + res = sp.IsMetaHeaderFinal(meta, ordered, 0) + assert.True(t, res) } //-------- requestFinalMissingHeaders func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderFake() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - sp.SetCurrHighestMetaHdrNonce(1) - res := sp.RequestFinalMissingHeaders() - assert.Equal(t, res > 0, true) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + sp.SetCurrHighestMetaHdrNonce(1) + res := sp.RequestFinalMissingHeaders() + assert.Equal(t, res > 0, true) } //--------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderFake() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - prevMeta := genesisBlocks[sharding.MetachainShardId] - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMeta.GetRandSeed(), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - - tdp.MetaBlocks().Put(metaHash, meta) - - prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: prevHash, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr.Round = 4 - - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Nil(t, err) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + prevMeta := genesisBlocks[sharding.MetachainShardId] + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMeta.GetRandSeed(), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + + tdp.MetaBlocks().Put(metaHash, meta) + + prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: prevHash, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr.Round = 4 + + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Nil(t, err) } func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { - t.Parallel() - - mbHdrs := make([]block.MiniBlockHeader, 0) - rootHash := []byte("rootHash") - txHash := []byte("txhash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - - tdp := mock.NewPoolsHolderFake() - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) - hdr.Round = 0 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) + t.Parallel() + + mbHdrs := make([]block.MiniBlockHeader, 0) + rootHash := []byte("rootHash") + txHash := []byte("txhash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + tdp := mock.NewPoolsHolderMock() + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) + + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) + hdr.Round = 0 + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Equal(t, err, process.ErrNilMetaBlockHeader) } //------- CommitBlock func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk := make(block.Body, 0) - - err := sp.CommitBlock(nil, &block.Header{}, blk) - assert.Equal(t, process.ErrNilBlockChain, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + accounts := &mock.AccountsStub{} + accounts.RevertToSnapshotCalled = func(snapshot int) error { + return nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk := make(block.Body, 0) + + err := sp.CommitBlock(nil, &block.Header{}, blk) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - errMarshalizer := errors.New("failure") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) (i []byte, e error) { - if reflect.DeepEqual(obj, hdr) { - return nil, errMarshalizer - } - - return []byte("obj"), nil - }, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizer, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blkc := createTestBlockchain() - - err := sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, errMarshalizer, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + errMarshalizer := errors.New("failure") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + if reflect.DeepEqual(obj, hdr) { + return nil, errMarshalizer + } + + return []byte("obj"), nil + }, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizer, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blkc := createTestBlockchain() + + err := sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, errMarshalizer, err) } func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - errPersister := errors.New("failure") - wasCalled := false - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - CommitCalled: func() ([]byte, error) { - return nil, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - hdrUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - HasCalled: func(key []byte) error { - return nil - }, - } - store := initStore() - store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err := sp.CommitBlock(blkc, hdr, body) - assert.True(t, wasCalled) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + errPersister := errors.New("failure") + wasCalled := false + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + CommitCalled: func() ([]byte, error) { + return nil, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + hdrUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + HasCalled: func(key []byte) error { + return nil + }, + } + store := initStore() + store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err := sp.CommitBlock(blkc, hdr, body) + assert.True(t, wasCalled) + assert.Nil(t, err) } func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - wasCalled := false - errPersister := errors.New("failure") - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - CommitCalled: func() (i []byte, e error) { - return nil, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{} - body := make(block.Body, 0) - body = append(body, &mb) - - miniBlockUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - } - store := initStore() - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, err) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err = sp.CommitBlock(blkc, hdr, body) - - assert.Nil(t, err) - assert.True(t, wasCalled) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + wasCalled := false + errPersister := errors.New("failure") + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + CommitCalled: func() (i []byte, e error) { + return nil, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{} + body := make(block.Body, 0) + body = append(body, &mb) + + miniBlockUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + } + store := initStore() + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, err) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err = sp.CommitBlock(blkc, hdr, body) + + assert.Nil(t, err) + assert.True(t, wasCalled) } func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := createTestBlockchain() - err := sp.CommitBlock(blkc, hdr, body) - - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return nil + } + blkc := createTestBlockchain() + err := sp.CommitBlock(blkc, hdr, body) + + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - txCache := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - LenCalled: func() int { - return 0 - }, - } - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return txCache - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { - }, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) { - - }, - } - } - - txHash := []byte("txHash") - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - - err = sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + txCache := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + LenCalled: func() int { + return 0 + }, + } + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return txCache + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { + }, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) { + + }, + } + } + + txHash := []byte("txHash") + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + + err = sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - forkDetectorAddCalled := false - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - if header == hdr { - forkDetectorAddCalled = true - return nil - } - - return errors.New("should have not got here") - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - assert.True(t, forkDetectorAddCalled) - assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) - //this should sleep as there is an async call to display current header and block in CommitBlock - time.Sleep(time.Second) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + forkDetectorAddCalled := false + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + if header == hdr { + forkDetectorAddCalled = true + return nil + } + + return errors.New("should have not got here") + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + assert.True(t, forkDetectorAddCalled) + assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) + //this should sleep as there is an async call to display current header and block in CommitBlock + time.Sleep(time.Second) } func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - var saveBlockCalled map[string]data.TransactionHandler - saveBlockCalledMutex := sync.Mutex{} - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{ - IndexerCalled: func() indexer.Indexer { - return &mock.IndexerMock{ - SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { - saveBlockCalledMutex.Lock() - saveBlockCalled = txPool - saveBlockCalledMutex.Unlock() - }, - } - }, - }, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{ - GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { - switch blockType { - case block.TxBlock: - return map[string]data.TransactionHandler{ - "tx_1": &transaction.Transaction{Nonce: 1}, - "tx_2": &transaction.Transaction{Nonce: 2}, - } - case block.SmartContractResultBlock: - return map[string]data.TransactionHandler{ - "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, - "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, - } - default: - return nil - } - }, - }, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - - // Wait for the index block go routine to start - time.Sleep(time.Second * 2) - - saveBlockCalledMutex.Lock() - wasCalled := saveBlockCalled - saveBlockCalledMutex.Unlock() - - assert.Equal(t, 4, len(wasCalled)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + var saveBlockCalled map[string]data.TransactionHandler + saveBlockCalledMutex := sync.Mutex{} + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{ + IndexerCalled: func() indexer.Indexer { + return &mock.IndexerMock{ + SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { + saveBlockCalledMutex.Lock() + saveBlockCalled = txPool + saveBlockCalledMutex.Unlock() + }, + } + }, + }, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{ + GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { + switch blockType { + case block.TxBlock: + return map[string]data.TransactionHandler{ + "tx_1": &transaction.Transaction{Nonce: 1}, + "tx_2": &transaction.Transaction{Nonce: 2}, + } + case block.SmartContractResultBlock: + return map[string]data.TransactionHandler{ + "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, + "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, + } + default: + return nil + } + }, + }, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + + // Wait for the index block go routine to start + time.Sleep(time.Second * 2) + + saveBlockCalledMutex.Lock() + wasCalled := saveBlockCalled + saveBlockCalledMutex.Unlock() + + assert.Equal(t, 4, len(wasCalled)) } func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - bl, err := sp.CreateBlockBody(0, func() bool { return true }) - // nil block - assert.Nil(t, bl) - // error - assert.Equal(t, process.ErrAccountStateDirty, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + bl, err := sp.CreateBlockBody(0, func() bool { return true }) + // nil block + assert.Nil(t, bl) + // error + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - haveTime := func() bool { - return false - } - bl, err := sp.CreateBlockBody(0, haveTime) - // no error - assert.Equal(t, process.ErrTimeIsOut, err) - // no miniblocks - assert.Nil(t, bl) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + haveTime := func() bool { + return false + } + bl, err := sp.CreateBlockBody(0, haveTime) + // no error + assert.Equal(t, process.ErrTimeIsOut, err) + // no miniblocks + assert.Nil(t, bl) } func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - haveTime := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk, err := sp.CreateBlockBody(0, haveTime) - assert.NotNil(t, blk) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + haveTime := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk, err := sp.CreateBlockBody(0, haveTime) + assert.NotNil(t, blk) + assert.Nil(t, err) } //------- ComputeNewNoncePrevHash func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerStub{} - hasher := &mock.HasherStub{} - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr, txBlock := createTestHdrTxBlockBody() - marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return []byte("hdrHeaderMarshalized"), nil - } - if reflect.DeepEqual(txBlock, obj) { - return []byte("txBlockBodyMarshalized"), nil - } - return nil, nil - } - hasher.ComputeCalled = func(s string) []byte { - if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") - } - if s == "txBlockBodyMarshalized" { - return []byte("tx block body hash") - } - return nil - } - _, err := be.ComputeHeaderHash(hdr) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerStub{} + hasher := &mock.HasherStub{} + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr, txBlock := createTestHdrTxBlockBody() + marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { + if hdr == obj { + return []byte("hdrHeaderMarshalized"), nil + } + if reflect.DeepEqual(txBlock, obj) { + return []byte("txBlockBodyMarshalized"), nil + } + return nil, nil + } + hasher.ComputeCalled = func(s string) []byte { + if s == "hdrHeaderMarshalized" { + return []byte("hdr hash") + } + if s == "txBlockBodyMarshalized" { + return []byte("tx block body hash") + } + return nil + } + _, err := be.ComputeHeaderHash(hdr) + assert.Nil(t, err) } func createTestHdrTxBlockBody() (*block.Header, block.Body) { - hasher := mock.HasherMock{} - hdr := &block.Header{ - Nonce: 1, - ShardId: 2, - Epoch: 3, - Round: 4, - TimeStamp: uint64(11223344), - PrevHash: hasher.Compute("prev hash"), - PubKeysBitmap: []byte{255, 0, 128}, - Signature: hasher.Compute("signature"), - RootHash: hasher.Compute("root hash"), - } - txBlock := block.Body{ - { - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_0_1"), - hasher.Compute("txHash_0_2"), - }, - }, - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_1_1"), - hasher.Compute("txHash_1_2"), - }, - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_2_1"), - }, - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - return hdr, txBlock + hasher := mock.HasherMock{} + hdr := &block.Header{ + Nonce: 1, + ShardId: 2, + Epoch: 3, + Round: 4, + TimeStamp: uint64(11223344), + PrevHash: hasher.Compute("prev hash"), + PubKeysBitmap: []byte{255, 0, 128}, + Signature: hasher.Compute("signature"), + RootHash: hasher.Compute("root hash"), + } + txBlock := block.Body{ + { + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_0_1"), + hasher.Compute("txHash_0_2"), + }, + }, + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_1_1"), + hasher.Compute("txHash_1_2"), + }, + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_2_1"), + }, + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + return hdr, txBlock } //------- ComputeNewNoncePrevHash func TestShardProcessor_DisplayLogInfo(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - hasher := mock.HasherMock{} - hdr, txBlock := createTestHdrTxBlockBody() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.NotNil(t, sp) - hdr.PrevHash = hasher.Compute("prev hash") - sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + hasher := mock.HasherMock{} + hdr, txBlock := createTestHdrTxBlockBody() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.NotNil(t, sp) + hdr.PrevHash = hasher.Compute("prev hash") + sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) } func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.NotNil(t, mbHeaders) - assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.NotNil(t, mbHeaders) + assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) } func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{Fail: true}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.NotNil(t, err) - assert.Nil(t, mbHeaders) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{Fail: true}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.NotNil(t, err) + assert.Nil(t, mbHeaders) } func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) } func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) { - t.Parallel() - // set accounts dirty - journalEntries := 3 - revToSnapshot := func(snapshot int) error { - journalEntries = 0 - return nil - } - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := bp.CommitBlock(nil, nil, nil) - assert.NotNil(t, err) - assert.Equal(t, 0, journalEntries) + t.Parallel() + // set accounts dirty + journalEntries := 3 + revToSnapshot := func(snapshot int) error { + journalEntries = 0 + return nil + } + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := bp.CommitBlock(nil, nil, nil) + assert.NotNil(t, err) + assert.Equal(t, 0, journalEntries) } func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - txHash1 := []byte("txHash1") - mb1 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash1}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - body = append(body, &mb1) - body = append(body, &mb0) - body = append(body, &mb1) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.NotNil(t, msh) - assert.NotNil(t, mstx) - _, found := msh[0] - assert.False(t, found) - - expectedBody := make(block.Body, 0) - err = marshalizer.Unmarshal(&expectedBody, msh[1]) - assert.Nil(t, err) - assert.Equal(t, len(expectedBody), 2) - assert.Equal(t, &mb1, expectedBody[0]) - assert.Equal(t, &mb1, expectedBody[1]) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + txHash1 := []byte("txHash1") + mb1 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash1}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + body = append(body, &mb1) + body = append(body, &mb0) + body = append(body, &mb1) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.NotNil(t, msh) + assert.NotNil(t, mstx) + _, found := msh[0] + assert.False(t, found) + + expectedBody := make(block.Body, 0) + err = marshalizer.Unmarshal(&expectedBody, msh[1]) + assert.Nil(t, err) + assert.Equal(t, len(expectedBody), 2) + assert.Equal(t, &mb1, expectedBody[0]) + assert.Equal(t, &mb1, expectedBody[1]) } func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - wr := wrongBody{} - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) - assert.Equal(t, process.ErrWrongTypeAssertion, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + wr := wrongBody{} + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) + assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) } func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) - assert.Equal(t, process.ErrNilMiniBlocks, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) + assert.Equal(t, process.ErrNilMiniBlocks, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) } func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { - t.Parallel() - wasCalled := false - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - wasCalled = true - return nil, process.ErrMarshalWithoutSuccess - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.True(t, wasCalled) - assert.Equal(t, 0, len(msh)) - assert.Equal(t, 0, len(mstx)) + t.Parallel() + wasCalled := false + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + wasCalled = true + return nil, process.ErrMarshalWithoutSuccess + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.True(t, wasCalled) + assert.Equal(t, 0, len(msh)) + assert.Equal(t, 0, len(mstx)) } //------- receivedMetaBlock func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - miniBlockHash2 := []byte("miniblock hash 2") - miniBlockHash3 := []byte("miniblock hash 3") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - string(miniBlockHash2): 0, - string(miniBlockHash3): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - miniBlockHash1Requested := int32(0) - miniBlockHash2Requested := int32(0) - miniBlockHash3Requested := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - if bytes.Equal(miniBlockHash1, miniblockHash) { - atomic.AddInt32(&miniBlockHash1Requested, 1) - } - if bytes.Equal(miniBlockHash2, miniblockHash) { - atomic.AddInt32(&miniBlockHash2Requested, 1) - } - if bytes.Equal(miniBlockHash3, miniblockHash) { - atomic.AddInt32(&miniBlockHash3Requested, 1) - } - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - bp.ReceivedMetaBlock(metaBlockHash) - - //we have to wait to be sure txHash1Requested is not incremented by a late call - time.Sleep(time.Second) - - assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + miniBlockHash2 := []byte("miniblock hash 2") + miniBlockHash3 := []byte("miniblock hash 3") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + string(miniBlockHash2): 0, + string(miniBlockHash3): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + miniBlockHash1Requested := int32(0) + miniBlockHash2Requested := int32(0) + miniBlockHash3Requested := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + if bytes.Equal(miniBlockHash1, miniblockHash) { + atomic.AddInt32(&miniBlockHash1Requested, 1) + } + if bytes.Equal(miniBlockHash2, miniblockHash) { + atomic.AddInt32(&miniBlockHash2Requested, 1) + } + if bytes.Equal(miniBlockHash3, miniblockHash) { + atomic.AddInt32(&miniBlockHash3Requested, 1) + } + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + bp.ReceivedMetaBlock(metaBlockHash) + + //we have to wait to be sure txHash1Requested is not incremented by a late call + time.Sleep(time.Second) + + assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) } //--------- receivedMetaBlockNoMissingMiniBlocks func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - noOfMissingMiniBlocks := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - atomic.AddInt32(&noOfMissingMiniBlocks, 1) - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - sp.ReceivedMetaBlock(metaBlockHash) - assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + noOfMissingMiniBlocks := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + atomic.AddInt32(&noOfMissingMiniBlocks, 1) + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + sp.ReceivedMetaBlock(metaBlockHash) + assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) } //--------- createAndProcessCrossMiniBlocksDstMe func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderFake() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - haveTimeTrue := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - assert.Equal(t, err == nil, true) - assert.Equal(t, len(miniBlockSlice) == 0, true) - assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) - assert.Equal(t, noOfTxs, uint32(0)) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + haveTimeTrue := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + assert.Equal(t, err == nil, true) + assert.Equal(t, len(miniBlockSlice) == 0, true) + assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) + assert.Equal(t, noOfTxs, uint32(0)) } func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderFake() - txHash := []byte(nil) - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - - startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - startHeaders[sharding.MetachainShardId] = &block.Header{} - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - startHeaders, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, sp) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte(nil) + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + + startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + startHeaders[sharding.MetachainShardId] = &block.Header{} + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + startHeaders, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, sp) + assert.Equal(t, process.ErrWrongTypeAssertion, err) } func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlocksInMetaBlock(t *testing.T) { - t.Parallel() - - haveTimeTrue := func() bool { - return true - } - tdp := mock.NewPoolsHolderFake() - destShardId := uint32(2) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - miniblocks := make([]*block.MiniBlock, 6) - - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - } - - //put 2 metablocks in pool - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - - mb1Hash := []byte("meta block 1") - tdp.MetaBlocks().Put( - mb1Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 2, - } - - mb2Hash := []byte("meta block 2") - tdp.MetaBlocks().Put( - mb2Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 3, - ShardInfo: make([]block.ShardData, 0), - Round: 3, - PrevRandSeed: []byte("roothash"), - } - - mb3Hash := []byte("meta block 3") - tdp.MetaBlocks().Put( - mb3Hash, - meta, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - - assert.Equal(t, 0, len(miniBlocksReturned)) - assert.Equal(t, 0, len(usedMetaHdrsHashes)) - assert.Equal(t, uint32(0), nrTxAdded) - assert.Nil(t, err) + t.Parallel() + + haveTimeTrue := func() bool { + return true + } + tdp := mock.NewPoolsHolderMock() + destShardId := uint32(2) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + miniblocks := make([]*block.MiniBlock, 6) + + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + } + + //put 2 metablocks in pool + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + + mb1Hash := []byte("meta block 1") + tdp.MetaBlocks().Put( + mb1Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 2, + } + + mb2Hash := []byte("meta block 2") + tdp.MetaBlocks().Put( + mb2Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 3, + ShardInfo: make([]block.ShardData, 0), + Round: 3, + PrevRandSeed: []byte("roothash"), + } + + mb3Hash := []byte("meta block 3") + tdp.MetaBlocks().Put( + mb3Hash, + meta, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + + assert.Equal(t, 0, len(miniBlocksReturned)) + assert.Equal(t, 0, len(usedMetaHdrsHashes)) + assert.Equal(t, uint32(0), nrTxAdded) + assert.Nil(t, err) } //------- createMiniBlocks func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - - //we will have a 3 txs in pool - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(0) - receiverShardId := uint32(0) - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - tx1ExecutionResult := uint64(0) - tx2ExecutionResult := uint64(0) - tx3ExecutionResult := uint64(0) - - txProcessorMock := &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { - tx1ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash2) { - tx2ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash3) { - tx3ExecutionResult = transaction.Nonce - } - - return nil - }, - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - accntAdapter := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accntAdapter, - &mock.RequestHandlerMock{}, - txProcessorMock, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accntAdapter, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - accntAdapter, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) - - assert.Nil(t, err) - //testing execution - assert.Equal(t, tx1Nonce, tx1ExecutionResult) - assert.Equal(t, tx2Nonce, tx2ExecutionResult) - assert.Equal(t, tx3Nonce, tx3ExecutionResult) - //one miniblock output - assert.Equal(t, 1, len(blockBody)) - //miniblock should have 3 txs - assert.Equal(t, 3, len(blockBody[0].TxHashes)) - //testing all 3 hashes are present in block body - assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a 3 txs in pool + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(0) + receiverShardId := uint32(0) + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + tx1ExecutionResult := uint64(0) + tx2ExecutionResult := uint64(0) + tx3ExecutionResult := uint64(0) + + txProcessorMock := &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + //execution, in this context, means moving the tx nonce to itx corresponding execution result variable + if transaction.Data == string(txHash1) { + tx1ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash2) { + tx2ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash3) { + tx3ExecutionResult = transaction.Nonce + } + + return nil + }, + } + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + accntAdapter := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accntAdapter, + &mock.RequestHandlerMock{}, + txProcessorMock, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accntAdapter, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + accntAdapter, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) + + assert.Nil(t, err) + //testing execution + assert.Equal(t, tx1Nonce, tx1ExecutionResult) + assert.Equal(t, tx2Nonce, tx2ExecutionResult) + assert.Equal(t, tx3Nonce, tx3ExecutionResult) + //one miniblock output + assert.Equal(t, 1, len(blockBody)) + //miniblock should have 3 txs + assert.Equal(t, 3, len(blockBody[0].TxHashes)) + //testing all 3 hashes are present in block body + assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) } func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { - t.Parallel() - - //we have 3 metablocks in pool each containing 2 miniblocks. - //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks - //The test should remove only one metablock - - destShardId := uint32(2) - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - - miniblocks := make([]*block.MiniBlock, 6) - miniblockHashes := make([][]byte, 6) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - miniblocks[i] = mb - miniblockHashes[i] = hash - } - - //put 3 metablocks in pool - mb1Hash := []byte("meta block 1") - dataPool.MetaBlocks().Put( - mb1Hash, - createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), - ) - mb2Hash := []byte("meta block 2") - dataPool.MetaBlocks().Put( - mb2Hash, - createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), - ) - mb3Hash := []byte("meta block 3") - dataPool.MetaBlocks().Put( - mb3Hash, - createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), - ) - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.CurrentShard = destShardId - shardCoordinator.SetNoShards(destShardId + 1) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - }, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - //create block body with first 3 miniblocks from miniblocks var - blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} - - hashes := make([][]byte, 0) - hashes = append(hashes, mb1Hash) - hashes = append(hashes, mb2Hash) - hashes = append(hashes, mb3Hash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) - - assert.Nil(t, err) - //check WasMiniBlockProcessed for remaining metablocks - metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) - assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) - assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) - - metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) + t.Parallel() + + //we have 3 metablocks in pool each containing 2 miniblocks. + //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks + //The test should remove only one metablock + + destShardId := uint32(2) + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + miniblocks := make([]*block.MiniBlock, 6) + miniblockHashes := make([][]byte, 6) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + miniblocks[i] = mb + miniblockHashes[i] = hash + } + + //put 3 metablocks in pool + mb1Hash := []byte("meta block 1") + dataPool.MetaBlocks().Put( + mb1Hash, + createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), + ) + mb2Hash := []byte("meta block 2") + dataPool.MetaBlocks().Put( + mb2Hash, + createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), + ) + mb3Hash := []byte("meta block 3") + dataPool.MetaBlocks().Put( + mb3Hash, + createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), + ) + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.CurrentShard = destShardId + shardCoordinator.SetNoShards(destShardId + 1) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + //create block body with first 3 miniblocks from miniblocks var + blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} + + hashes := make([][]byte, 0) + hashes = append(hashes, mb1Hash) + hashes = append(hashes, mb2Hash) + hashes = append(hashes, mb3Hash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) + + assert.Nil(t, err) + //check WasMiniBlockProcessed for remaining metablocks + metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) + assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) + assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) + + metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := be.RestoreBlockIntoPools(nil, nil) - assert.NotNil(t, err) - assert.Equal(t, process.ErrNilBlockHeader, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := be.RestoreBlockIntoPools(nil, nil) + assert.NotNil(t, err) + assert.Equal(t, process.ErrNilBlockHeader, err) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.RestoreBlockIntoPools(&block.Header{}, nil) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilTxBlockBody) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.RestoreBlockIntoPools(&block.Header{}, nil) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilTxBlockBody) } func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { - t.Parallel() - - txHash := []byte("tx hash 1") - - dataPool := mock.NewPoolsHolderFake() - marshalizerMock := &mock.MarshalizerMock{} - hasherMock := &mock.HasherStub{} - - body := make(block.Body, 0) - tx := transaction.Transaction{Nonce: 1} - buffTx, _ := marshalizerMock.Marshal(tx) - - store := &mock.ChainStorerMock{ - GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { - m := make(map[string][]byte, 0) - m[string(txHash)] = buffTx - return m, nil - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizerMock, - hasherMock, - dataPool, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasherMock, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - miniblockHash := []byte("mini block hash 1") - hasherMock.ComputeCalled = func(s string) []byte { - return miniblockHash - } - - metablockHash := []byte("meta block hash 1") - metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - metablockHeader.SetMiniBlockProcessed(metablockHash, true) - dataPool.MetaBlocks().Put( - metablockHash, - metablockHeader, - ) - - err = sp.RestoreBlockIntoPools(&block.Header{}, body) - - miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) - txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) - metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) - metablock := metablockFromPool.(*block.MetaBlock) - assert.Nil(t, err) - assert.Equal(t, &miniblock, miniblockFromPool) - assert.Equal(t, &tx, txFromPool) - assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) + t.Parallel() + + txHash := []byte("tx hash 1") + + dataPool := mock.NewPoolsHolderMock() + marshalizerMock := &mock.MarshalizerMock{} + hasherMock := &mock.HasherStub{} + + body := make(block.Body, 0) + tx := transaction.Transaction{Nonce: 1} + buffTx, _ := marshalizerMock.Marshal(tx) + + store := &mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + m := make(map[string][]byte, 0) + m[string(txHash)] = buffTx + return m, nil + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizerMock, + hasherMock, + dataPool, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasherMock, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + miniblockHash := []byte("mini block hash 1") + hasherMock.ComputeCalled = func(s string) []byte { + return miniblockHash + } + + metablockHash := []byte("meta block hash 1") + metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) + metablockHeader.SetMiniBlockProcessed(metablockHash, true) + dataPool.MetaBlocks().Put( + metablockHash, + metablockHeader, + ) + + err = sp.RestoreBlockIntoPools(&block.Header{}, body) + + miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) + txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) + metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) + metablock := metablockFromPool.(*block.MetaBlock) + assert.Nil(t, err) + assert.Equal(t, &miniblock, miniblockFromPool) + assert.Equal(t, &tx, txFromPool) + assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) } func TestShardProcessor_DecodeBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := make(block.Body, 0) - body = append(body, &block.MiniBlock{ReceiverShardID: 69}) - message, err := marshalizerMock.Marshal(body) - assert.Nil(t, err) - - dcdBlk := sp.DecodeBlockBody(nil) - assert.Nil(t, dcdBlk) - - dcdBlk = sp.DecodeBlockBody(message) - assert.Equal(t, body, dcdBlk) - assert.Equal(t, uint32(69), body[0].ReceiverShardID) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := make(block.Body, 0) + body = append(body, &block.MiniBlock{ReceiverShardID: 69}) + message, err := marshalizerMock.Marshal(body) + assert.Nil(t, err) + + dcdBlk := sp.DecodeBlockBody(nil) + assert.Nil(t, dcdBlk) + + dcdBlk = sp.DecodeBlockBody(message) + assert.Equal(t, body, dcdBlk) + assert.Equal(t, uint32(69), body[0].ReceiverShardID) } func TestShardProcessor_DecodeBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr := &block.Header{} - hdr.Nonce = 1 - hdr.TimeStamp = uint64(0) - hdr.Signature = []byte("A") - message, err := marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - message, err = marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - dcdHdr := sp.DecodeBlockHeader(nil) - assert.Nil(t, dcdHdr) - - dcdHdr = sp.DecodeBlockHeader(message) - assert.Equal(t, hdr, dcdHdr) - assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr := &block.Header{} + hdr.Nonce = 1 + hdr.TimeStamp = uint64(0) + hdr.Signature = []byte("A") + message, err := marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + message, err = marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + dcdHdr := sp.DecodeBlockHeader(nil) + assert.Nil(t, dcdHdr) + + dcdHdr = sp.DecodeBlockHeader(message) + assert.Equal(t, hdr, dcdHdr) + assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) } func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := initDataPool([]byte("tx_hash1")) - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - lastHdr := &block.MetaBlock{Round: 9, - Nonce: 44, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - err := sp.IsHdrConstructionValid(nil, prevHdr) - assert.Equal(t, err, process.ErrNilBlockHeader) - - err = sp.IsHdrConstructionValid(currHdr, nil) - assert.Equal(t, err, process.ErrNilBlockHeader) - - currHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - prevHdr.RootHash = nil - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) - - currHdr.Nonce = 46 - prevHdr.Nonce = 45 - prevHdr.Round = currHdr.Round + 1 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) - - prevHdr.Round = currHdr.Round - 1 - currHdr.Nonce = prevHdr.Nonce + 2 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) - - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) - - currHdr.PrevHash = prevHash - prevHdr.RootHash = []byte("prevRootHash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := initDataPool([]byte("tx_hash1")) + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + lastHdr := &block.MetaBlock{Round: 9, + Nonce: 44, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //put the existing headers inside datapool + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + + err := sp.IsHdrConstructionValid(nil, prevHdr) + assert.Equal(t, err, process.ErrNilBlockHeader) + + err = sp.IsHdrConstructionValid(currHdr, nil) + assert.Equal(t, err, process.ErrNilBlockHeader) + + currHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRootStateMissmatch) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + prevHdr.RootHash = nil + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) + + currHdr.Nonce = 46 + prevHdr.Nonce = 45 + prevHdr.Round = currHdr.Round + 1 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + + prevHdr.Round = currHdr.Round - 1 + currHdr.Nonce = prevHdr.Nonce + 2 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = prevHdr.Nonce + 1 + prevHdr.RandSeed = []byte("randomwrong") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRandSeedMismatch) + + prevHdr.RandSeed = currRandSeed + currHdr.PrevHash = []byte("wronghash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + + currHdr.PrevHash = prevHash + prevHdr.RootHash = []byte("prevRootHash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - shardHdr := &block.Header{Round: 15} - shardBlock := block.Body{} - - blockHeader := &block.Header{} - - // test header not in pool and defer called - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - assert.Equal(t, 0, len(processedMetaHdrs)) - - // wrong header type in pool and defer called - dataPool.MetaBlocks().Put(currHash, shardHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Equal(t, nil, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes = make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //put the existing headers inside datapool + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + shardHdr := &block.Header{Round: 15} + shardBlock := block.Body{} + + blockHeader := &block.Header{} + + // test header not in pool and defer called + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + assert.Equal(t, 0, len(processedMetaHdrs)) + + // wrong header type in pool and defer called + dataPool.MetaBlocks().Put(currHash, shardHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Equal(t, nil, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes = make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func createShardData(hasher hashing.Hasher, marshalizer marshal.Marshalizer, miniBlocks []block.MiniBlock) []block.ShardData { - shardData := make([]block.ShardData, len(miniBlocks)) - for i := 0; i < len(miniBlocks); i++ { - marshaled, _ := marshalizer.Marshal(miniBlocks[i]) - hashed := hasher.Compute(string(marshaled)) - - shardMBHeader := block.ShardMiniBlockHeader{ - ReceiverShardId: miniBlocks[i].ReceiverShardID, - SenderShardId: miniBlocks[i].SenderShardID, - TxCount: uint32(len(miniBlocks[i].TxHashes)), - Hash: hashed, - } - shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) - shardMBHeaders = append(shardMBHeaders, shardMBHeader) - - shardData[0].ShardId = miniBlocks[i].SenderShardID - shardData[0].TxCount = 10 - shardData[0].HeaderHash = []byte("headerHash") - shardData[0].ShardMiniBlockHeaders = shardMBHeaders - } - - return shardData + shardData := make([]block.ShardData, len(miniBlocks)) + for i := 0; i < len(miniBlocks); i++ { + marshaled, _ := marshalizer.Marshal(miniBlocks[i]) + hashed := hasher.Compute(string(marshaled)) + + shardMBHeader := block.ShardMiniBlockHeader{ + ReceiverShardId: miniBlocks[i].ReceiverShardID, + SenderShardId: miniBlocks[i].SenderShardID, + TxCount: uint32(len(miniBlocks[i].TxHashes)), + Hash: hashed, + } + shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) + shardMBHeaders = append(shardMBHeaders, shardMBHeader) + + shardData[0].ShardId = miniBlocks[i].SenderShardID + shardData[0].TxCount = 10 + shardData[0].HeaderHash = []byte("headerHash") + shardData[0].ShardMiniBlockHeaders = shardMBHeaders + } + + return shardData } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 2, putCalledNr) - - assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 2, putCalledNr) + + assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ - Round: 12, - PrevRandSeed: []byte("nextrand"), - PrevHash: currHash, - Nonce: 47}) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - assert.Equal(t, 2, len(processedMetaHdrs)) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ + Round: 12, + PrevRandSeed: []byte("nextrand"), + PrevHash: currHash, + Nonce: 47}) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + assert.Equal(t, 2, len(processedMetaHdrs)) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func createOneHeaderOneBody() (*block.Header, block.Body) { - txHash := []byte("tx_hash1") - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := &block.Header{ - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - return hdr, body + txHash := []byte("tx_hash1") + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := &block.Header{ + Nonce: 1, + PrevHash: []byte(""), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + + return hdr, body } func TestShardProcessor_CheckHeaderBodyCorrelationReceiverMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationSenderMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationTxCountMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationHashMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationShouldPass(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Nil(t, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Nil(t, err) } func TestShardProcessor_restoreMetaBlockIntoPoolShouldPass(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - poolFake := mock.NewPoolsHolderFake() - - metaBlock := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - poolFake, - &mock.ChainStorerMock{ - GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { - return marshalizer.Marshal(&metaBlock) - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - }, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniblockHashes := make(map[int][][]byte, 0) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - hasher := &mock.HasherStub{} - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, nil, metaBlockRestored) - assert.False(t, ok) - - err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, &metaBlock, metaBlockRestored) - assert.Nil(t, err) + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + poolFake := mock.NewPoolsHolderMock() + + metaBlock := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + poolFake, + &mock.ChainStorerMock{ + GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + return marshalizer.Marshal(&metaBlock) + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + }, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniblockHashes := make(map[int][][]byte, 0) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + hasher := &mock.HasherStub{} + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, nil, metaBlockRestored) + assert.False(t, ok) + + err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + + metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, &metaBlock, metaBlockRestored) + assert.Nil(t, err) } func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - txHash := []byte("tx_hash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - hasher := &mock.HasherStub{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: 0, - SenderShardId: 2, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardId: 1, - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - idp := initDataPool([]byte("tx_hash1")) - idp.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{ - Nonce: 1, - Round: 1, - ShardInfo: shardHdrs, - }, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - return true - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - idp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - meta := block.MetaBlock{ - Nonce: 0, - ShardInfo: make([]block.ShardData, 0), - } - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) - - assert.Equal(t, 1, len(orderedMetaBlocks)) - assert.Equal(t, orderedMetaBlocks[""], metaHash) - assert.Nil(t, err) + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + txHash := []byte("tx_hash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + hasher := &mock.HasherStub{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: 0, + SenderShardId: 2, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + idp := initDataPool([]byte("tx_hash1")) + idp.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: shardHdrs, + }, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + return true + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + idp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + meta := block.MetaBlock{ + Nonce: 0, + ShardInfo: make([]block.ShardData, 0), + } + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) + + assert.Equal(t, 1, len(orderedMetaBlocks)) + assert.Equal(t, orderedMetaBlocks[""], metaHash) + assert.Nil(t, err) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t *testing.T) { - t.Parallel() - - dataPool := initDataPool([]byte("tx_hash1")) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := initDataPool([]byte("tx_hash1")) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) - _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) + _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - ownHdr := &block.Header{ - Nonce: 1, - Round: 1, - } - ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) - _ = dataPool.Headers().Put(ownHash, ownHdr) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - ownHdr = &block.Header{ - Nonce: 2, - Round: 2, - } - ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) - mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) - _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) - - shardInfo = make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 3, - Epoch: 0, - Round: 3, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + ownHdr := &block.Header{ + Nonce: 1, + Round: 1, + } + ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) + _ = dataPool.Headers().Put(ownHash, ownHdr) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + ownHdr = &block.Header{ + Nonce: 2, + Round: 2, + } + ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) + mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) + _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) + + shardInfo = make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 3, + Epoch: 0, + Round: 3, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 9c1f82a9944..83ab99a8db6 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -489,26 +489,32 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( miniBlocks = append(miniBlocks, interMBs...) } - tc.addRewardsMiniBlocks(&miniBlocks) - + rewardsMBs := tc.createRewardsMiniBlocks() + if len(interMBs) > 0 { + miniBlocks = append(miniBlocks, rewardsMBs...) + } + return miniBlocks } -func (tc *transactionCoordinator) addRewardsMiniBlocks(miniBlocks *block.MiniBlockSlice) { +func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { // add rewards transactions to separate miniBlocks - interimProc := tc.getInterimProcessor(block.RewardsBlockType) + interimProc := tc.getInterimProcessor(block.RewardsBlock) if interimProc == nil { - return + return nil } + miniBlocks := make(block.MiniBlockSlice, 0) rewardsMbs := interimProc.CreateAllInterMiniBlocks() for key, mb := range rewardsMbs { - mb.ReceiverShardID = key - mb.SenderShardID = tc.shardCoordinator.SelfId() - mb.Type = block.RewardsBlockType + mb.ReceiverShardID = key + mb.SenderShardID = tc.shardCoordinator.SelfId() + mb.Type = block.RewardsBlock - *miniBlocks = append(*miniBlocks, mb) + miniBlocks = append(miniBlocks, mb) } + + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { @@ -516,7 +522,7 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { - if blockType == block.RewardsBlockType { + if blockType == block.RewardsBlock { // this has to be processed last continue } @@ -585,7 +591,7 @@ func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType b baseTopic = factory.PeerChBodyTopic case block.SmartContractResultBlock: baseTopic = factory.UnsignedTransactionTopic - case block.RewardsBlockType: + case block.RewardsBlock: baseTopic = factory.RewardsTransactionTopic default: return "", process.ErrUnknownBlockType @@ -734,7 +740,7 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body wg.Add(len(tc.interimProcessors)) for key, interimProc := range tc.interimProcessors { - if key == block.RewardsBlockType { + if key == block.RewardsBlock { // this has to be processed last wg.Done() continue @@ -757,7 +763,7 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body return errFound } - interimProc := tc.getInterimProcessor(block.RewardsBlockType) + interimProc := tc.getInterimProcessor(block.RewardsBlock) if interimProc == nil { return nil } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0d5dd2160da..a634c8ad371 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -197,7 +197,7 @@ func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { tc, err := NewTransactionCoordinator( nil, &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -213,7 +213,7 @@ func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -245,7 +245,7 @@ func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -261,7 +261,7 @@ func TestNewTransactionCoordinator_NilHasher(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, nil, &mock.InterimProcessorContainerMock{}, @@ -277,7 +277,7 @@ func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, nil, @@ -293,7 +293,7 @@ func TestNewTransactionCoordinator_OK(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -309,7 +309,7 @@ func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -327,7 +327,7 @@ func TestTransactionCoordinator_SeparateBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -418,7 +418,7 @@ func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -442,7 +442,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -481,7 +481,7 @@ func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -502,7 +502,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), interimContainer, @@ -554,7 +554,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -602,7 +602,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -628,7 +628,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -771,7 +771,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, container, &mock.InterimProcessorContainerMock{}, @@ -1071,7 +1071,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //1 tx hash will be in cache @@ -1438,7 +1438,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed @@ -1548,7 +1548,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 8a47d0d3a51..5813ea86eda 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -79,7 +79,7 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } - err = container.Add(block.RewardsBlockType, interproc) + err = container.Add(block.RewardsBlock, interproc) if err != nil { return nil, err } diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 30de7b88534..46709fa5310 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -125,7 +125,7 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return nil, err } - err = container.Add(block.RewardsBlockType, preproc) + err = container.Add(block.RewardsBlock, preproc) if err != nil { return nil, err } diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index e6936f0d4d8..be2df4e8b94 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -16,7 +16,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -38,7 +38,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -60,7 +60,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.ChainStorerMock{}, nil, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -82,7 +82,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -126,7 +126,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -148,7 +148,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, nil, &mock.RequestHandlerMock{}, @@ -170,7 +170,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -192,7 +192,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -214,7 +214,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -236,7 +236,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, @@ -258,7 +258,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, nil, @@ -280,7 +280,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, diff --git a/process/interface.go b/process/interface.go index a8a67829cd1..8a9d3c4eff1 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1,12 +1,12 @@ package process import ( - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "time" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -14,7 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 3ce79896273..9714a47ffc1 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -1,160 +1,178 @@ package mock import ( - "bytes" + "bytes" + "fmt" + "math/big" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding" ) // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]sharding.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) - LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) } func NewNodesCoordinatorMock() *NodesCoordinatorMock { - return &NodesCoordinatorMock{ - ShardConsensusSize: 1, - MetaConsensusSize: 1, - ShardId: 0, - NbShards: 1, - Validators: make(map[uint32][]sharding.Validator), - } + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + for sh := uint32(0); sh < nbShards; sh++ { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } } func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { - if ncm.GetSelectedPublicKeysCalled != nil { - return ncm.GetSelectedPublicKeysCalled(selection, shardId) - } + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } - if len(ncm.Validators) == 0 { - return nil, sharding.ErrNilInputNodesMap - } + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } - pubKeys := make([]string, 0) + pubKeys := make([]string, 0) - for _, v := range ncm.Validators[shardId] { - pubKeys = append(pubKeys, string(v.PubKey())) - } + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } - return pubKeys, nil + return pubKeys, nil } func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( - randomness []byte, - round uint64, - shardId uint32, + randomness []byte, + round uint64, + shardId uint32, ) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) - } + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } - validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) - if err != nil { - return nil, err - } + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } - valGrStr := make([]string, 0) + valGrStr := make([]string, 0) - for _, v := range validators { - valGrStr = append(valGrStr, string(v.PubKey())) - } + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } - return valGrStr, nil + return valGrStr, nil } func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, + randomness []byte, + round uint64, + shardId uint32, ) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) - } + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } - validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) - if err != nil { - return nil, err - } + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } - addresses := make([]string, 0) + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } - - return addresses, nil + return addresses, nil } func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { - if ncm.LoadNodesPerShardsCalled != nil { - return ncm.LoadNodesPerShardsCalled(nodes) - } + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } - if nodes == nil { - return sharding.ErrNilInputNodesMap - } + if nodes == nil { + return sharding.ErrNilInputNodesMap + } - ncm.Validators = nodes + ncm.Validators = nodes - return nil + return nil } func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( - randomess []byte, - round uint64, - shardId uint32, + randomess []byte, + round uint64, + shardId uint32, ) ([]sharding.Validator, error) { - var consensusSize uint32 + var consensusSize uint32 - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) - } + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } - if ncm.ShardId == sharding.MetachainShardId { - consensusSize = ncm.MetaConsensusSize - } else { - consensusSize = ncm.ShardConsensusSize - } + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } - if randomess == nil { - return nil, sharding.ErrNilRandomness - } + if randomess == nil { + return nil, sharding.ErrNilRandomness + } - validatorsGroup := make([]sharding.Validator, 0) + validatorsGroup := make([]sharding.Validator, 0) - for i := uint32(0); i < consensusSize; i++ { - validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) - } + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } - return validatorsGroup, nil + return validatorsGroup, nil } func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { - if ncm.GetValidatorWithPublicKeyCalled != nil { - return ncm.GetValidatorWithPublicKeyCalled(publicKey) - } - - if publicKey == nil { - return nil, 0, sharding.ErrNilPubKey - } - - for shardId, shardEligible := range ncm.Validators { - for i := 0; i < len(shardEligible); i++ { - if bytes.Equal(publicKey, shardEligible[i].PubKey()) { - return shardEligible[i], shardId, nil - } - } - } - - return nil, 0, sharding.ErrValidatorNotFound + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound } diff --git a/process/mock/poolsHolderFake.go b/process/mock/poolsHolderMock.go similarity index 66% rename from process/mock/poolsHolderFake.go rename to process/mock/poolsHolderMock.go index d94491dbf7d..d54072a4643 100644 --- a/process/mock/poolsHolderFake.go +++ b/process/mock/poolsHolderMock.go @@ -9,7 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) -type PoolsHolderFake struct { +type PoolsHolderMock struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier rewardTransactions dataRetriever.ShardedDataCacherNotifier @@ -21,8 +21,8 @@ type PoolsHolderFake struct { metaHdrNonces dataRetriever.Uint64SyncMapCacher } -func NewPoolsHolderFake() *PoolsHolderFake { - phf := &PoolsHolderFake{} +func NewPoolsHolderMock() *PoolsHolderMock { + phf := &PoolsHolderMock{} phf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) @@ -43,46 +43,46 @@ func NewPoolsHolderFake() *PoolsHolderFake { return phf } -func (phf *PoolsHolderFake) Transactions() dataRetriever.ShardedDataCacherNotifier { - return phf.transactions +func (phm *PoolsHolderMock) Transactions() dataRetriever.ShardedDataCacherNotifier { + return phm.transactions } -func (phf *PoolsHolderFake) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { - return phf.unsignedTransactions +func (phm *PoolsHolderMock) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.unsignedTransactions } -func (phf *PoolsHolderFake) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { - return phf.rewardTransactions +func (phm *PoolsHolderMock) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.rewardTransactions } -func (phf *PoolsHolderFake) Headers() storage.Cacher { - return phf.headers +func (phm *PoolsHolderMock) Headers() storage.Cacher { + return phm.headers } -func (phf *PoolsHolderFake) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.hdrNonces +func (phm *PoolsHolderMock) HeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.hdrNonces } -func (phf *PoolsHolderFake) MiniBlocks() storage.Cacher { - return phf.miniBlocks +func (phm *PoolsHolderMock) MiniBlocks() storage.Cacher { + return phm.miniBlocks } -func (phf *PoolsHolderFake) PeerChangesBlocks() storage.Cacher { - return phf.peerChangesBlocks +func (phm *PoolsHolderMock) PeerChangesBlocks() storage.Cacher { + return phm.peerChangesBlocks } -func (phf *PoolsHolderFake) MetaBlocks() storage.Cacher { - return phf.metaBlocks +func (phm *PoolsHolderMock) MetaBlocks() storage.Cacher { + return phm.metaBlocks } -func (phf *PoolsHolderFake) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.metaHdrNonces +func (phm *PoolsHolderMock) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.metaHdrNonces } -func (phf *PoolsHolderFake) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { - phf.transactions = transactions +func (phm *PoolsHolderMock) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { + phm.transactions = transactions } -func (phf *PoolsHolderFake) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { - phf.unsignedTransactions = scrs +func (phm *PoolsHolderMock) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { + phm.unsignedTransactions = scrs } diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index a8773f3aa89..e6b172730be 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -15,6 +15,8 @@ type rewardTxProcessor struct { shardCoordinator sharding.Coordinator } +// NewRewardTxProcessor creates a rewardTxProcessor instance +// TODO: add unit tests func NewRewardTxProcessor( accountsDB state.AccountsAdapter, adrConv state.AddressConverter, diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 78028ecaa98..1ae97c9e1aa 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -1,47 +1,42 @@ package transaction import ( - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "math/big" ) type TxProcessor *txProcessor -var mutex sync.Mutex - func (txProc *txProcessor) GetAddresses(tx *transaction.Transaction) (adrSrc, adrDst state.AddressContainer, err error) { - return txProc.getAddresses(tx) + return txProc.getAddresses(tx) } func (txProc *txProcessor) GetAccounts(adrSrc, adrDst state.AddressContainer, ) (acntSrc, acntDst *state.Account, err error) { - return txProc.getAccounts(adrSrc, adrDst) + return txProc.getAccounts(adrSrc, adrDst) } func (txProc *txProcessor) CheckTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - return txProc.checkTxValues(tx, acntSnd) + return txProc.checkTxValues(tx, acntSnd) } func (txProc *txProcessor) MoveBalances(acntSrc, acntDst *state.Account, value *big.Int) error { - return txProc.moveBalances(acntSrc, acntDst, value) + return txProc.moveBalances(acntSrc, acntDst, value) } func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { - return txProc.increaseNonce(acntSrc) + return txProc.increaseNonce(acntSrc) } func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { - mutex.Lock() - preprocess.MinTxFee = minTxFee - mutex.Unlock() + mutTxFee.Lock() + minTxFee = minTxFee + mutTxFee.Unlock() } func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { - mutex.Lock() - preprocess.MinGasPrice = minGasPrice - mutex.Unlock() + mutTxFee.Lock() + minGasPrice = minGasPrice + mutTxFee.Unlock() } diff --git a/process/transaction/process.go b/process/transaction/process.go index 0a1d6abaae3..c1fc6488a2d 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -1,406 +1,416 @@ package transaction import ( - "bytes" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" - "math/big" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() +// minGasPrice is the minimal gas price to be paid for any transaction +// TODO: Set minGasPrice and minTxFee to some positive value (TBD) +var minGasPrice = uint64(0) + +// minTxFee is the minimal fee to be paid for any transaction +var minTxFee = uint64(0) +var mutTxFee sync.RWMutex + // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - hasher hashing.Hasher - scProcessor process.SmartContractProcessor - marshalizer marshal.Marshalizer - rewardTxHandler process.UnsignedTxHandler - shardCoordinator sharding.Coordinator - txTypeHandler process.TxTypeHandler + accounts state.AccountsAdapter + adrConv state.AddressConverter + hasher hashing.Hasher + scProcessor process.SmartContractProcessor + marshalizer marshal.Marshalizer + rewardTxHandler process.UnsignedTxHandler + shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine func NewTxProcessor( - accounts state.AccountsAdapter, - hasher hashing.Hasher, - addressConv state.AddressConverter, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - scProcessor process.SmartContractProcessor, - rewardTxHandler process.UnsignedTxHandler, - txTypeHandler process.TxTypeHandler, + accounts state.AccountsAdapter, + hasher hashing.Hasher, + addressConv state.AddressConverter, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + scProcessor process.SmartContractProcessor, + rewardTxHandler process.UnsignedTxHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { - if accounts == nil { - return nil, process.ErrNilAccountsAdapter - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if addressConv == nil { - return nil, process.ErrNilAddressConverter - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - if scProcessor == nil { - return nil, process.ErrNilSmartContractProcessor - } - if rewardTxHandler == nil { - return nil, process.ErrNilUnsignedTxHandler - } - if txTypeHandler == nil { - return nil, process.ErrNilTxTypeHandler - } - - return &txProcessor{ - accounts: accounts, - hasher: hasher, - adrConv: addressConv, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - scProcessor: scProcessor, - rewardTxHandler: rewardTxHandler, - txTypeHandler: txTypeHandler, - }, nil + if accounts == nil { + return nil, process.ErrNilAccountsAdapter + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if addressConv == nil { + return nil, process.ErrNilAddressConverter + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + if scProcessor == nil { + return nil, process.ErrNilSmartContractProcessor + } + if rewardTxHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } + if txTypeHandler == nil { + return nil, process.ErrNilTxTypeHandler + } + + return &txProcessor{ + accounts: accounts, + hasher: hasher, + adrConv: addressConv, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + scProcessor: scProcessor, + rewardTxHandler: rewardTxHandler, + txTypeHandler: txTypeHandler, + }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint64) error { - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } - - adrSrc, adrDst, err := txProc.getAddresses(tx) - if err != nil { - return err - } - - acntSnd, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.checkTxValues(tx, acntSnd) - if err != nil { - return err - } - - txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) - if err != nil { - return err - } - - switch txType { - case process.MoveBalance: - return txProc.processMoveBalance(tx, adrSrc, adrDst) - case process.SCDeployment: - return txProc.processSCDeployment(tx, adrSrc, roundIndex) - case process.SCInvoking: - return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) - case process.RewardTx: - return txProc.processRewardTx(tx, adrSrc) - } - - return process.ErrWrongTransaction + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } + + adrSrc, adrDst, err := txProc.getAddresses(tx) + if err != nil { + return err + } + + acntSnd, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.checkTxValues(tx, acntSnd) + if err != nil { + return err + } + + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) + if err != nil { + return err + } + + switch txType { + case process.MoveBalance: + return txProc.processMoveBalance(tx, adrSrc, adrDst) + case process.SCDeployment: + return txProc.processSCDeployment(tx, adrSrc, roundIndex) + case process.SCInvoking: + return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) + case process.RewardTx: + return txProc.processRewardTx(tx, adrSrc) + } + + return process.ErrWrongTransaction } func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { - if acntSnd == nil { - return nil, nil - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - - txDataLen := int64(len(tx.Data)) - minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(preprocess.MinGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(preprocess.MinTxFee)) - - if minFee.Cmp(cost) > 0 { - return nil, process.ErrNotEnoughFeeInTransactions - } - - if acntSnd.Balance.Cmp(cost) < 0 { - return nil, process.ErrInsufficientFunds - } - - operation := big.NewInt(0) - err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) - if err != nil { - return nil, err - } - - return cost, nil + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + mutTxFee.RLock() + minFee := big.NewInt(0) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) + mutTxFee.RUnlock() + + if minFee.Cmp(cost) > 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + return cost, nil } func (txProc *txProcessor) processRewardTx( - tx data.TransactionHandler, - adrSrc state.AddressContainer, + tx data.TransactionHandler, + adrSrc state.AddressContainer, ) error { - rTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) - if err != nil { - return err - } - - // is sender address in node shard - if acntSrc != nil { - op := big.NewInt(0) - err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) - if err != nil { - return err - } - } - - if rTx.ShardId == txProc.shardCoordinator.SelfId() { - txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) - } - - return nil + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) + if err != nil { + return err + } + + // is sender address in node shard + if acntSrc != nil { + op := big.NewInt(0) + err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) + if err != nil { + return err + } + } + + if rTx.ShardId == txProc.shardCoordinator.SelfId() { + txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) + } + + return nil } func (txProc *txProcessor) processMoveBalance( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } - txFee, err := txProc.processTxFee(tx, acntSrc) - if err != nil { - return err - } + txFee, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } - value := tx.Value + value := tx.Value - err = txProc.moveBalances(acntSrc, acntDst, value) - if err != nil { - return err - } + err = txProc.moveBalances(acntSrc, acntDst, value) + if err != nil { + return err + } - // is sender address in node shard - if acntSrc != nil { - err = txProc.increaseNonce(acntSrc) - if err != nil { - return err - } - } + // is sender address in node shard + if acntSrc != nil { + err = txProc.increaseNonce(acntSrc) + if err != nil { + return err + } + } - txProc.rewardTxHandler.ProcessTransactionFee(txFee) + txProc.rewardTxHandler.ProcessTransactionFee(txFee) - return nil + return nil } func (txProc *txProcessor) processSCDeployment( - tx *transaction.Transaction, - adrSrc state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) + return err } func (txProc *txProcessor) processSCInvoking( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } - - err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } + + err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) + return err } func (txProc *txProcessor) getAddresses( - tx *transaction.Transaction, + tx *transaction.Transaction, ) (state.AddressContainer, state.AddressContainer, error) { - //for now we assume that the address = public key - adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) - if err != nil { - return nil, nil, err - } - - adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) - if err != nil { - return nil, nil, err - } - - return adrSrc, adrDst, nil + //for now we assume that the address = public key + adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) + if err != nil { + return nil, nil, err + } + + adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) + if err != nil { + return nil, nil, err + } + + return adrSrc, adrDst, nil } func (txProc *txProcessor) getAccounts( - adrSrc, adrDst state.AddressContainer, + adrSrc, adrDst state.AddressContainer, ) (*state.Account, *state.Account, error) { - var acntSrc, acntDst *state.Account + var acntSrc, acntDst *state.Account - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - shardForDst := txProc.shardCoordinator.ComputeId(adrDst) + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + shardForDst := txProc.shardCoordinator.ComputeId(adrDst) - srcInShard := shardForSrc == shardForCurrentNode - dstInShard := shardForDst == shardForCurrentNode + srcInShard := shardForSrc == shardForCurrentNode + dstInShard := shardForDst == shardForCurrentNode - if srcInShard && adrSrc == nil || - dstInShard && adrDst == nil { - return nil, nil, process.ErrNilAddressContainer - } + if srcInShard && adrSrc == nil || + dstInShard && adrDst == nil { + return nil, nil, process.ErrNilAddressContainer + } - if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { - acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { + acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - return account, account, nil - } + return account, account, nil + } - if srcInShard { - acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if srcInShard { + acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntSrcWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntSrcWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntSrc = account - } + acntSrc = account + } - if dstInShard { - acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) - if err != nil { - return nil, nil, err - } + if dstInShard { + acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) + if err != nil { + return nil, nil, err + } - account, ok := acntDstWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntDstWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntDst = account - } + acntDst = account + } - return acntSrc, acntDst, nil + return acntSrc, acntDst, nil } func (txProc *txProcessor) getAccountFromAddress(adrSrc state.AddressContainer) (state.AccountHandler, error) { - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } func (txProc *txProcessor) checkTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - // transaction was already done at sender shard - return nil - } - - if acntSnd.GetNonce() < tx.Nonce { - return process.ErrHigherNonceInTransaction - } - if acntSnd.GetNonce() > tx.Nonce { - return process.ErrLowerNonceInTransaction - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - cost = cost.Add(cost, tx.Value) - - if cost.Cmp(big.NewInt(0)) == 0 { - return nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if stAcc.Balance.Cmp(cost) < 0 { - return process.ErrInsufficientFunds - } - - return nil + if acntSnd == nil || acntSnd.IsInterfaceNil() { + // transaction was already done at sender shard + return nil + } + + if acntSnd.GetNonce() < tx.Nonce { + return process.ErrHigherNonceInTransaction + } + if acntSnd.GetNonce() > tx.Nonce { + return process.ErrLowerNonceInTransaction + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + cost = cost.Add(cost, tx.Value) + + if cost.Cmp(big.NewInt(0)) == 0 { + return nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + + return nil } func (txProc *txProcessor) moveBalances(acntSrc, acntDst *state.Account, - value *big.Int, + value *big.Int, ) error { - operation1 := big.NewInt(0) - operation2 := big.NewInt(0) - - // is sender address in node shard - if acntSrc != nil { - err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) - if err != nil { - return err - } - } - - // is receiver address in node shard - if acntDst != nil { - err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) - if err != nil { - return err - } - } - - return nil + operation1 := big.NewInt(0) + operation2 := big.NewInt(0) + + // is sender address in node shard + if acntSrc != nil { + err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) + if err != nil { + return err + } + } + + // is receiver address in node shard + if acntDst != nil { + err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) + if err != nil { + return err + } + } + + return nil } func (txProc *txProcessor) increaseNonce(acntSrc *state.Account) error { - return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) + return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) } From d0177b15b9ee4a2cf4b41492b7d5cdfaa2d64fb6 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 9 Sep 2019 14:30:55 +0300 Subject: [PATCH 087/234] process: fixes after merge & review findings --- .../requestHandlers/requestHandler.go | 2 +- .../mock/unsignedTxHandlerMock.go | 8 + process/block/baseProcess.go | 2 +- process/block/metablock_test.go | 2 + .../block/preprocess/rewardTxPreProcessor.go | 712 +++++----- process/block/preprocess/rewardsHandler.go | 58 +- process/coordinator/process.go | 1234 ++++++++--------- process/interface.go | 393 +++--- process/mock/poolsHolderMock.go | 2 +- process/mock/rewardTxProcessorMock.go | 19 +- process/mock/unsignedTxHandlerMock.go | 8 + process/rewardTransaction/process.go | 152 +- process/transaction/export_test.go | 29 +- 13 files changed, 1349 insertions(+), 1272 deletions(-) diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 229bd3d493a..c18a2081f3a 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -197,7 +197,7 @@ func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonc } // IsInterfaceNil returns true if there is no value under the interface -func (rrh *ResolverRequestHandler) IsInterfaceNil() bool { +func (rrh *resolverRequestHandler) IsInterfaceNil() bool { if rrh == nil { return true } diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go index 7097c4a31e8..bd588555a10 100644 --- a/integrationTests/mock/unsignedTxHandlerMock.go +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -51,3 +51,11 @@ func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { } return ut.VerifyCreatedUTxsCalled() } + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} \ No newline at end of file diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index d9e71b592bf..db4b32ec92d 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -522,7 +522,7 @@ func checkProcessorNilParameters( if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { return process.ErrNilNodesCoordinator } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil { + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { return process.ErrNilSpecialAddressHandler } if uint64Converter == nil || uint64Converter.IsInterfaceNil() { diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index f8b7b27686d..cde408dd849 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -2536,6 +2536,8 @@ func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 1d0f7907b57..0fd14766675 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -1,454 +1,460 @@ package preprocess import ( - "fmt" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type rewardTxPreprocessor struct { - *basePreProcess - chReceivedAllRewardTxs chan bool - onRequestRewardTx func(shardID uint32, txHashes [][]byte) - rewardTxsForBlock txsForBlock - rewardTxPool dataRetriever.ShardedDataCacherNotifier - storage dataRetriever.StorageService - rewardsProcessor process.RewardTransactionProcessor - accounts state.AccountsAdapter + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter } // NewRewardTxPreprocessor creates a new reward transaction preprocessor object func NewRewardTxPreprocessor( - rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, - store dataRetriever.StorageService, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - rewardProcessor process.RewardTransactionProcessor, - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), ) (*rewardTxPreprocessor, error) { - if hasher == nil { - return nil, process.ErrNilHasher - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if rewardTxDataPool == nil { - return nil, process.ErrNilRewardTxDataPool - } - if store == nil { - return nil, process.ErrNilStorage - } - if rewardProcessor == nil { - return nil, process.ErrNilTxProcessor - } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil { - return nil, process.ErrNilAccountsAdapter - } - if onRequestRewardTransaction == nil { - return nil, process.ErrNilRequestHandler - } - - bpp := &basePreProcess{ - hasher: hasher, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - } - - rtp := &rewardTxPreprocessor{ - basePreProcess: bpp, - storage: store, - rewardTxPool: rewardTxDataPool, - onRequestRewardTx: onRequestRewardTransaction, - rewardsProcessor: rewardProcessor, - accounts: accounts, - } - - rtp.chReceivedAllRewardTxs = make(chan bool) - rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - - return rtp, nil + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilTxProcessor + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil } // waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { - select { - case <-rtp.chReceivedAllRewardTxs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } // IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { - if requestedRewardTxs > 0 { - log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) - err := rtp.waitForRewardTxHashes(haveTime()) - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - missingRewardTxs := rtp.rewardTxsForBlock.missingTxs - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) - if err != nil { - return err - } - } - return nil + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil } // RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { - if body == nil { - return process.ErrNilTxBlockBody - } + if body == nil { + return process.ErrNilTxBlockBody + } - err := rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) - - return err + return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) } // RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( - body block.Body, - miniBlockPool storage.Cacher, + body block.Body, + miniBlockPool storage.Cacher, ) (int, map[int][]byte, error) { - if miniBlockPool == nil { - return 0, nil, process.ErrNilMiniBlockPool - } - - miniBlockHashes := make(map[int][]byte) - - rewardTxsRestored := 0 - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - - strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) - rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - for txHash, txBuff := range rewardTxBuff { - tx := rewardTx.RewardTx{} - err = rtp.marshalizer.Unmarshal(&tx, txBuff) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) - } - - restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - miniBlockHashes[i] = restoredHash - rewardTxsRestored += len(miniBlock.TxHashes) - } - - return rewardTxsRestored, miniBlockHashes, nil + if miniBlockPool == nil { + return 0, nil, process.ErrNilMiniBlockPool + } + + miniBlockHashes := make(map[int][]byte) + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + miniBlockHashes[i] = restoredHash + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, miniBlockHashes, nil } // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { - // basic validation already done in interceptors - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { - continue - } - - for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - txHash := miniBlock.TxHashes[j] - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - if txInfo == nil || txInfo.tx == nil { - return process.ErrMissingTransaction - } - - rTx, ok := txInfo.tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - err := rtp.processRewardTransaction( - txHash, - rTx, - round, - miniBlock.SenderShardID, - miniBlock.ReceiverShardID, - ) - if err != nil { - return err - } - } - } - return nil + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txInfo == nil || txInfo.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txInfo.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil } // SaveTxBlockToStorage saves the reward transactions from body into storage func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { - for i := 0; i < len(body); i++ { - miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlock || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { - continue - } - - err := rtp.saveTxsToStorage( - miniBlock.TxHashes, - &rtp.rewardTxsForBlock, - rtp.storage, - dataRetriever.RewardTransactionUnit, - ) - if err != nil { - return err - } - } - - return nil + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil } // receivedRewardTransaction is a callback function called when a new reward transaction // is added in the reward transactions pool func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { - receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) - if receivedAllMissing { - rtp.chReceivedAllRewardTxs <- true - } + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } } // CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round func (rtp *rewardTxPreprocessor) CreateBlockStarted() { - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() } // RequestBlockTransactions request for reward transactions if missing from a block.Body func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { - requestedRewardTxs := 0 - missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { - txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} - for _, txHash := range rewardTxHashesInfo.txHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} - } - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - for senderShardID, scrHashesInfo := range missingRewardTxsForShards { - requestedRewardTxs += len(scrHashesInfo.txHashes) - rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) - } - - return requestedRewardTxs + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { + txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} + for _, txHash := range rewardTxHashesInfo.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, scrHashesInfo := range missingRewardTxsForShards { + requestedRewardTxs += len(scrHashesInfo.txHashes) + rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) + } + + return requestedRewardTxs } // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { - onlyRewardTxsFromOthersBody := block.Body{} - for _, mb := range body { - if mb.Type != block.RewardsBlock { - continue - } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } - - onlyRewardTxsFromOthersBody = append(onlyRewardTxsFromOthersBody, mb) - } - - missingTxsForShard := rtp.computeExistingAndMissing( - onlyRewardTxsFromOthersBody, - &rtp.rewardTxsForBlock, - rtp.chReceivedAllRewardTxs, - block.RewardsBlock, - rtp.rewardTxPool, - ) - - return missingTxsForShard + rewardTxsFromOthersBody := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxsFromOthersBody = append(rewardTxsFromOthersBody, mb) + } + + missingTxsForShard := rtp.computeExistingAndMissing( + rewardTxsFromOthersBody, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) + + return missingTxsForShard } // processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool func (rtp *rewardTxPreprocessor) processRewardTransaction( - rewardTxHash []byte, - rewardTx *rewardTx.RewardTx, - round uint64, - sndShardId uint32, - dstShardId uint32, + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, ) error { - err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) - if err != nil { - return err - } + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } - txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - return nil + return nil } // RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { - missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) - rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) - return len(missingRewardTxsForMiniBlock) + return len(missingRewardTxsForMiniBlock) } // computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { - missingRewardTxs := make([][]byte, 0) - if mb.Type != block.RewardsBlock { - return missingRewardTxs - } - - for _, txHash := range mb.TxHashes { - tx, _ := process.GetTransactionHandlerFromPool( - mb.SenderShardID, - mb.ReceiverShardID, - txHash, - rtp.rewardTxPool, - ) - - if tx == nil { - missingRewardTxs = append(missingRewardTxs, txHash) - } - } - - return missingRewardTxs + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlock { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs } // getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( - mb *block.MiniBlock, - haveTime func() bool, + mb *block.MiniBlock, + haveTime func() bool, ) ([]*rewardTx.RewardTx, [][]byte, error) { - strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) - txCache := rtp.rewardTxPool.ShardDataStore(strCache) - if txCache == nil { - return nil, nil, process.ErrNilRewardTxDataPool - } - - // verify if all reward transactions exists - rewardTxs := make([]*rewardTx.RewardTx, 0) - txHashes := make([][]byte, 0) - for _, txHash := range mb.TxHashes { - if !haveTime() { - return nil, nil, process.ErrTimeIsOut - } - - tmp, _ := txCache.Peek(txHash) - if tmp == nil { - return nil, nil, process.ErrNilRewardTransaction - } - - tx, ok := tmp.(*rewardTx.RewardTx) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - txHashes = append(txHashes, txHash) - rewardTxs = append(rewardTxs, tx) - } - - return rewardTxs, txHashes, nil + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, ok := txCache.Peek(txHash) + if !ok { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil } // CreateAndProcessMiniBlock creates the miniblock from storage and processes the smartContractResults added into the miniblock func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { - return nil, nil + return nil, nil } // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { - if miniBlock.Type != block.RewardsBlock { - return process.ErrWrongTypeInMiniBlock - } - - miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) - if err != nil { - return err - } - - for index := range miniBlockRewardTxs { - if !haveTime() { - return process.ErrTimeIsOut - } - - err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) - if err != nil { - return err - } - } - - txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for index, txHash := range miniBlockTxHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - return nil + if miniBlock.Type != block.RewardsBlock { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + return process.ErrTimeIsOut + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil } // CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) - if err != nil { - return nil, err - } + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } - return marshaledRewardTxs, nil + return marshaledRewardTxs, nil } // GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { - rewardTxPool := make(map[string]data.TransactionHandler) + rewardTxPool := make(map[string]data.TransactionHandler) + + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txInfo.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { - rewardTxPool[txHash] = txInfo.tx - } - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + return rewardTxPool +} - return rewardTxPool +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index d10d274111c..0e70271ceca 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,6 +1,7 @@ package preprocess import ( + "github.com/ElrondNetwork/elrond-go/sharding" "math/big" "sync" @@ -21,11 +22,12 @@ const burnPercentage = 0.5 // 1 = 100%, 0 = 0% var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mut sync.Mutex - accumulatedFees *big.Int + address process.SpecialAddressHandler + shardCoordinator sharding.Coordinator + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mut sync.Mutex + accumulatedFees *big.Int rewardTxsFromBlock map[string]*rewardTx.RewardTx } @@ -33,6 +35,7 @@ type rewardsHandler struct { // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( address process.SpecialAddressHandler, + shardCoordinator sharding.Coordinator, hasher hashing.Hasher, marshalizer marshal.Marshalizer, ) (*rewardsHandler, error) { @@ -47,9 +50,10 @@ func NewRewardTxHandler( } rtxh := &rewardsHandler{ - address: address, - hasher: hasher, - marshalizer: marshalizer, + address: address, + shardCoordinator: shardCoordinator, + hasher: hasher, + marshalizer: marshalizer, } rtxh.accumulatedFees = big.NewInt(0) rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) @@ -91,6 +95,8 @@ func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlo if mb, ok = miniBlocks[dstShId]; !ok { mb = &block.MiniBlock{ ReceiverShardID: dstShId, + SenderShardID: rtxh.shardCoordinator.SelfId(), + Type: block.RewardsBlock, } } @@ -188,7 +194,7 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { // CreateAllUTxs creates all the needed reward transactions // According to economic paper, out of the block fees 50% are burned, 40% go to the leader and 10% go -// to Elrond community fund. Fixed rewards for every validator are +// to Elrond community fund. Fixed rewards for every validator in consensus are generated by the system. func (rtxh *rewardsHandler) CreateAllUTxs() []data.TransactionHandler { rewardTxs := make([]data.TransactionHandler, 0) @@ -233,6 +239,7 @@ func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.Transactio consensusRewardTxs = append(consensusRewardTxs, rTx) } + return consensusRewardTxs } @@ -274,3 +281,36 @@ func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, return make([][]byte, 0), nil } + +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range rtxh.rewardTxsFromBlock { + + senderShard := txInfo.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = txInfo + } + rtxh.mut.Unlock() + + return rewardTxPool +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtxh *rewardsHandler) IsInterfaceNil() bool { + if rtxh == nil { + return true + } + return false +} diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 482beebaff0..232b4712d94 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,790 +1,786 @@ package coordinator import ( - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type transactionCoordinator struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - miniBlockPool storage.Cacher + shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + miniBlockPool storage.Cacher - mutPreProcessor sync.RWMutex - txPreProcessors map[block.Type]process.PreProcessor - keysTxPreProcs []block.Type + mutPreProcessor sync.RWMutex + txPreProcessors map[block.Type]process.PreProcessor + keysTxPreProcs []block.Type - mutInterimProcessors sync.RWMutex - interimProcessors map[block.Type]process.IntermediateTransactionHandler - keysInterimProcs []block.Type + mutInterimProcessors sync.RWMutex + interimProcessors map[block.Type]process.IntermediateTransactionHandler + keysInterimProcs []block.Type - mutRequestedTxs sync.RWMutex - requestedTxs map[block.Type]int + mutRequestedTxs sync.RWMutex + requestedTxs map[block.Type]int - onRequestMiniBlock func(shardId uint32, mbHash []byte) + onRequestMiniBlock func(shardId uint32, mbHash []byte) } var log = logger.DefaultLogger() // NewTransactionCoordinator creates a transaction coordinator to run and coordinate preprocessors and processors func NewTransactionCoordinator( - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - dataPool dataRetriever.PoolsHolder, - requestHandler process.RequestHandler, - preProcessors process.PreProcessorsContainer, - interProcessors process.IntermediateProcessorContainer, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + dataPool dataRetriever.PoolsHolder, + requestHandler process.RequestHandler, + preProcessors process.PreProcessorsContainer, + interProcessors process.IntermediateProcessorContainer, ) (*transactionCoordinator, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - if interProcessors == nil || interProcessors.IsInterfaceNil() { - return nil, process.ErrNilIntermediateProcessorContainer - } - if preProcessors == nil || preProcessors.IsInterfaceNil() { - return nil, process.ErrNilPreProcessorsContainer - } - - tc := &transactionCoordinator{ - shardCoordinator: shardCoordinator, - accounts: accounts, - } - - tc.miniBlockPool = dataPool.MiniBlocks() - if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { - return nil, process.ErrNilMiniBlockPool - } - tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) - - tc.onRequestMiniBlock = requestHandler.RequestMiniBlock - tc.requestedTxs = make(map[block.Type]int) - tc.txPreProcessors = make(map[block.Type]process.PreProcessor) - tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) - - tc.keysTxPreProcs = preProcessors.Keys() - sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { - return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] - }) - for _, value := range tc.keysTxPreProcs { - preProc, err := preProcessors.Get(value) - if err != nil { - return nil, err - } - tc.txPreProcessors[value] = preProc - } - - tc.keysInterimProcs = interProcessors.Keys() - sort.Slice(tc.keysInterimProcs, func(i, j int) bool { - return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] - }) - for _, value := range tc.keysInterimProcs { - interProc, err := interProcessors.Get(value) - if err != nil { - return nil, err - } - tc.interimProcessors[value] = interProc - } - - return tc, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + if interProcessors == nil || interProcessors.IsInterfaceNil() { + return nil, process.ErrNilIntermediateProcessorContainer + } + if preProcessors == nil || preProcessors.IsInterfaceNil() { + return nil, process.ErrNilPreProcessorsContainer + } + + tc := &transactionCoordinator{ + shardCoordinator: shardCoordinator, + accounts: accounts, + } + + tc.miniBlockPool = dataPool.MiniBlocks() + if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { + return nil, process.ErrNilMiniBlockPool + } + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) + + tc.onRequestMiniBlock = requestHandler.RequestMiniBlock + tc.requestedTxs = make(map[block.Type]int) + tc.txPreProcessors = make(map[block.Type]process.PreProcessor) + tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) + + tc.keysTxPreProcs = preProcessors.Keys() + sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { + return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] + }) + for _, value := range tc.keysTxPreProcs { + preProc, err := preProcessors.Get(value) + if err != nil { + return nil, err + } + tc.txPreProcessors[value] = preProc + } + + tc.keysInterimProcs = interProcessors.Keys() + sort.Slice(tc.keysInterimProcs, func(i, j int) bool { + return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] + }) + for _, value := range tc.keysInterimProcs { + interProc, err := interProcessors.Get(value) + if err != nil { + return nil, err + } + tc.interimProcessors[value] = interProc + } + + return tc, nil } // separateBodyByType creates a map of bodies according to type func (tc *transactionCoordinator) separateBodyByType(body block.Body) map[block.Type]block.Body { - separatedBodies := make(map[block.Type]block.Body) + separatedBodies := make(map[block.Type]block.Body) - for i := 0; i < len(body); i++ { - mb := body[i] + for i := 0; i < len(body); i++ { + mb := body[i] - if separatedBodies[mb.Type] == nil { - separatedBodies[mb.Type] = block.Body{} - } + if separatedBodies[mb.Type] == nil { + separatedBodies[mb.Type] = block.Body{} + } - separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) - } + separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) + } - return separatedBodies + return separatedBodies } // initRequestedTxs init the requested txs number func (tc *transactionCoordinator) initRequestedTxs() { - tc.mutRequestedTxs.Lock() - tc.requestedTxs = make(map[block.Type]int) - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs = make(map[block.Type]int) + tc.mutRequestedTxs.Unlock() } // RequestBlockTransactions verifies missing transaction and requests them func (tc *transactionCoordinator) RequestBlockTransactions(body block.Body) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - tc.initRequestedTxs() + tc.initRequestedTxs() - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } - requestedTxs := preproc.RequestBlockTransactions(blockBody) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } + requestedTxs := preproc.RequestBlockTransactions(blockBody) - tc.mutRequestedTxs.Lock() - tc.requestedTxs[blockType] = requestedTxs - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs[blockType] = requestedTxs + tc.mutRequestedTxs.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() } // IsDataPreparedForProcessing verifies if all the needed data is prepared func (tc *transactionCoordinator) IsDataPreparedForProcessing(haveTime func() time.Duration) error { - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} + wg := sync.WaitGroup{} - tc.mutRequestedTxs.RLock() - wg.Add(len(tc.requestedTxs)) + tc.mutRequestedTxs.RLock() + wg.Add(len(tc.requestedTxs)) - for key, value := range tc.requestedTxs { - go func(blockType block.Type, requestedTxs int) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() + for key, value := range tc.requestedTxs { + go func(blockType block.Type, requestedTxs int) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() - return - } + return + } - err := preproc.IsDataPrepared(requestedTxs, haveTime) - if err != nil { - log.Debug(err.Error()) + err := preproc.IsDataPrepared(requestedTxs, haveTime) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - tc.mutRequestedTxs.RUnlock() - wg.Wait() + tc.mutRequestedTxs.RUnlock() + wg.Wait() - return errFound + return errFound } // SaveBlockDataToStorage saves the data from block body into storage units func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + // Length of body types + another go routine for the intermediate transactions + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } - err := preproc.SaveTxBlockToStorage(blockBody) - if err != nil { - log.Debug(err.Error()) + err := preproc.SaveTxBlockToStorage(blockBody) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreproc == nil { - return errFound - } + intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) + if intermediatePreproc == nil { + return errFound + } - err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - return errFound + return errFound } // RestoreBlockDataFromStorage restores block data from storage to pool func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - localMutex := sync.Mutex{} - totalRestoredTx := 0 - restoredMbHashes := make(map[int][][]byte) + var errFound error + localMutex := sync.Mutex{} + totalRestoredTx := 0 + restoredMbHashes := make(map[int][][]byte) - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - restoredMbs := make(map[int][]byte) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + restoredMbs := make(map[int][]byte) - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } - restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - localMutex.Lock() - errFound = err - localMutex.Unlock() - } + localMutex.Lock() + errFound = err + localMutex.Unlock() + } - localMutex.Lock() - totalRestoredTx += restoredTxs + localMutex.Lock() + totalRestoredTx += restoredTxs - for shId, mbHash := range restoredMbs { - restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) - } + for shId, mbHash := range restoredMbs { + restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) + } - localMutex.Unlock() + localMutex.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return totalRestoredTx, restoredMbHashes, errFound + return totalRestoredTx, restoredMbHashes, errFound } // RemoveBlockDataFromPool deletes block data from pools func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - wg.Done() - return - } + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + wg.Done() + return + } - err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return errFound + return errFound } // ProcessBlockTransaction processes transactions and updates state tries func (tc *transactionCoordinator) ProcessBlockTransaction( - body block.Body, - round uint64, - haveTime func() time.Duration, + body block.Body, + round uint64, + haveTime func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysTxPreProcs { - if separatedBodies[blockType] == nil { - continue - } - - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - return process.ErrMissingPreProcessor - } - - err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) - if err != nil { - return err - } - } - - return nil + separatedBodies := tc.separateBodyByType(body) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysTxPreProcs { + if separatedBodies[blockType] == nil { + continue + } + + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + return process.ErrMissingPreProcessor + } + + err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) + if err != nil { + return err + } + } + + return nil } // CreateMbsAndProcessCrossShardTransactionsDstMe creates miniblocks and processes cross shard transaction // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr data.HeaderHandler, - maxTxRemaining uint32, - maxMbRemaining uint32, - round uint64, - haveTime func() bool, + hdr data.HeaderHandler, + maxTxRemaining uint32, + maxMbRemaining uint32, + round uint64, + haveTime func() bool, ) (block.MiniBlockSlice, uint32, bool) { - miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) - nrMBprocessed := 0 - - if hdr == nil || hdr.IsInterfaceNil() { - return miniBlocks, nrTxAdded, true - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - if !haveTime() { - break - } - - if hdr.GetMiniBlockProcessed([]byte(key)) { - nrMBprocessed++ - continue - } - - miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) - if miniVal == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - continue - } - - miniBlock, ok := miniVal.(*block.MiniBlock) - if !ok { - continue - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - // overflow would happen if processing would continue - txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining - if txOverFlow { - return miniBlocks, nrTxAdded, false - } - - requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) - if requestedTxs > 0 { - continue - } - - err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) - if err != nil { - continue - } - - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, miniBlock) - nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) - nrMBprocessed++ - - mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining - if mbOverFlow { - return miniBlocks, nrTxAdded, false - } - } - - allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) - return miniBlocks, nrTxAdded, allMBsProcessed + miniBlocks := make(block.MiniBlockSlice, 0) + nrTxAdded := uint32(0) + nrMBprocessed := 0 + + if hdr == nil || hdr.IsInterfaceNil() { + return miniBlocks, nrTxAdded, true + } + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + if !haveTime() { + break + } + + if hdr.GetMiniBlockProcessed([]byte(key)) { + nrMBprocessed++ + continue + } + + miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) + if miniVal == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + continue + } + + miniBlock, ok := miniVal.(*block.MiniBlock) + if !ok { + continue + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + // overflow would happen if processing would continue + txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining + if txOverFlow { + return miniBlocks, nrTxAdded, false + } + + requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) + if requestedTxs > 0 { + continue + } + + err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) + if err != nil { + continue + } + + // all txs processed, add to processed miniblocks + miniBlocks = append(miniBlocks, miniBlock) + nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) + nrMBprocessed++ + + mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining + if mbOverFlow { + return miniBlocks, nrTxAdded, false + } + } + + allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) + return miniBlocks, nrTxAdded, allMBsProcessed } // CreateMbsAndProcessTransactionsFromMe creates miniblocks and processes transactions from pool func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( - maxTxSpaceRemained uint32, - maxMbSpaceRemained uint32, - round uint64, - haveTime func() bool, + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false - - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } - - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } - - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } - - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } - } - } - - interMBs := tc.processAddedInterimTransactions() - if len(interMBs) > 0 { - miniBlocks = append(miniBlocks, interMBs...) - } - - rewardsMBs := tc.createRewardsMiniBlocks() - if len(interMBs) > 0 { - miniBlocks = append(miniBlocks, rewardsMBs...) - } - - return miniBlocks + txPreProc := tc.getPreProcessor(block.TxBlock) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + txSpaceRemained := int(maxTxSpaceRemained) + + newMBAdded := true + for newMBAdded { + newMBAdded = false + + for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { + if txSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txPreProc.CreateAndProcessMiniBlock( + tc.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + interMBs := tc.processAddedInterimTransactions() + if len(interMBs) > 0 { + miniBlocks = append(miniBlocks, interMBs...) + } + + rewardsMBs := tc.createRewardsMiniBlocks() + if len(rewardsMBs) > 0 { + miniBlocks = append(miniBlocks, rewardsMBs...) + } + + return miniBlocks } func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { - // add rewards transactions to separate miniBlocks - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - rewardsMbs := interimProc.CreateAllInterMiniBlocks() - for key, mb := range rewardsMbs { - mb.ReceiverShardID = key - mb.SenderShardID = tc.shardCoordinator.SelfId() - mb.Type = block.RewardsBlock - - miniBlocks = append(miniBlocks, mb) - } - - return miniBlocks + // add rewards transactions to separate miniBlocks + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + rewardsMbs := interimProc.CreateAllInterMiniBlocks() + for _, mb := range rewardsMbs { + miniBlocks = append(miniBlocks, mb) + } + + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { - miniBlocks := make(block.MiniBlockSlice, 0) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysInterimProcs { - if blockType == block.RewardsBlock { - // this has to be processed last - continue - } - - interimProc := tc.getInterimProcessor(blockType) - if interimProc == nil { - // this will never be reached as keysInterimProcs are the actual keys from the interimMap - continue - } - - currMbs := interimProc.CreateAllInterMiniBlocks() - for _, value := range currMbs { - miniBlocks = append(miniBlocks, value) - } - } - - return miniBlocks + miniBlocks := make(block.MiniBlockSlice, 0) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + + interimProc := tc.getInterimProcessor(blockType) + if interimProc == nil { + // this will never be reached as keysInterimProcs are the actual keys from the interimMap + continue + } + + currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { + miniBlocks = append(miniBlocks, value) + } + } + + return miniBlocks } // CreateBlockStarted initializes necessary data for preprocessors at block create or block process func (tc *transactionCoordinator) CreateBlockStarted() { - tc.mutPreProcessor.RLock() - for _, value := range tc.txPreProcessors { - value.CreateBlockStarted() - } - tc.mutPreProcessor.RUnlock() - - tc.mutInterimProcessors.RLock() - for _, value := range tc.interimProcessors { - value.CreateBlockStarted() - } - tc.mutInterimProcessors.RUnlock() + tc.mutPreProcessor.RLock() + for _, value := range tc.txPreProcessors { + value.CreateBlockStarted() + } + tc.mutPreProcessor.RUnlock() + + tc.mutInterimProcessors.RLock() + for _, value := range tc.interimProcessors { + value.CreateBlockStarted() + } + tc.mutInterimProcessors.RUnlock() } func (tc *transactionCoordinator) getPreProcessor(blockType block.Type) process.PreProcessor { - tc.mutPreProcessor.RLock() - preprocessor, exists := tc.txPreProcessors[blockType] - tc.mutPreProcessor.RUnlock() + tc.mutPreProcessor.RLock() + preprocessor, exists := tc.txPreProcessors[blockType] + tc.mutPreProcessor.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return preprocessor + return preprocessor } func (tc *transactionCoordinator) getInterimProcessor(blockType block.Type) process.IntermediateTransactionHandler { - tc.mutInterimProcessors.RLock() - interProcessor, exists := tc.interimProcessors[blockType] - tc.mutInterimProcessors.RUnlock() + tc.mutInterimProcessors.RLock() + interProcessor, exists := tc.interimProcessors[blockType] + tc.mutInterimProcessors.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return interProcessor + return interProcessor } func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType block.Type) (string, error) { - var baseTopic string - - switch mbType { - case block.TxBlock: - baseTopic = factory.TransactionTopic - case block.PeerBlock: - baseTopic = factory.PeerChBodyTopic - case block.SmartContractResultBlock: - baseTopic = factory.UnsignedTransactionTopic - case block.RewardsBlock: - baseTopic = factory.RewardsTransactionTopic - default: - return "", process.ErrUnknownBlockType - } - - transactionTopic := baseTopic + - shardC.CommunicationIdentifier(destShId) - - return transactionTopic, nil + var baseTopic string + + switch mbType { + case block.TxBlock: + baseTopic = factory.TransactionTopic + case block.PeerBlock: + baseTopic = factory.PeerChBodyTopic + case block.SmartContractResultBlock: + baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlock: + baseTopic = factory.RewardsTransactionTopic + default: + return "", process.ErrUnknownBlockType + } + + transactionTopic := baseTopic + + shardC.CommunicationIdentifier(destShId) + + return transactionTopic, nil } // CreateMarshalizedData creates marshalized data for broadcasting func (tc *transactionCoordinator) CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) { - mrsTxs := make(map[string][][]byte) - bodies := make(map[uint32]block.MiniBlockSlice) - - for i := 0; i < len(body); i++ { - miniblock := body[i] - receiverShardId := miniblock.ReceiverShardID - if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard - continue - } - - broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) - if err != nil { - log.Debug(err.Error()) - continue - } - - preproc := tc.getPreProcessor(miniblock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) - - currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) - } - - interimProc := tc.getInterimProcessor(miniblock.Type) - if interimProc == nil || interimProc.IsInterfaceNil() { - continue - } - - currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsInterTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) - } - } - - return bodies, mrsTxs + mrsTxs := make(map[string][][]byte) + bodies := make(map[uint32]block.MiniBlockSlice) + + for i := 0; i < len(body); i++ { + miniblock := body[i] + receiverShardId := miniblock.ReceiverShardID + if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard + continue + } + + broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) + if err != nil { + log.Debug(err.Error()) + continue + } + + preproc := tc.getPreProcessor(miniblock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) + + currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) + } + + interimProc := tc.getInterimProcessor(miniblock.Type) + if interimProc == nil || interimProc.IsInterfaceNil() { + continue + } + + currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsInterTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) + } + } + + return bodies, mrsTxs } // GetAllCurrentUsedTxs returns the cached transaction data for current round func (tc *transactionCoordinator) GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler { - txPool := make(map[string]data.TransactionHandler, 0) - interTxPool := make(map[string]data.TransactionHandler, 0) + txPool := make(map[string]data.TransactionHandler, 0) + interTxPool := make(map[string]data.TransactionHandler, 0) - preProc := tc.getPreProcessor(blockType) - if preProc != nil { - txPool = preProc.GetAllCurrentUsedTxs() - } + preProc := tc.getPreProcessor(blockType) + if preProc != nil { + txPool = preProc.GetAllCurrentUsedTxs() + } - interProc := tc.getInterimProcessor(blockType) - if interProc != nil { - interTxPool = interProc.GetAllCurrentFinishedTxs() - } + interProc := tc.getInterimProcessor(blockType) + if interProc != nil { + interTxPool = interProc.GetAllCurrentFinishedTxs() + } - for hash, tx := range interTxPool { - txPool[hash] = tx - } + for hash, tx := range interTxPool { + txPool[hash] = tx + } - return txPool + return txPool } // RequestMiniBlocks request miniblocks if missing func (tc *transactionCoordinator) RequestMiniBlocks(header data.HeaderHandler) { - if header == nil || header.IsInterfaceNil() { - return - } - - crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - obj, _ := tc.miniBlockPool.Peek([]byte(key)) - if obj == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - } - } + if header == nil || header.IsInterfaceNil() { + return + } + + crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + obj, _ := tc.miniBlockPool.Peek([]byte(key)) + if obj == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + } + } } // receivedMiniBlock is a callback function when a new miniblock was received // it will further ask for missing transactions func (tc *transactionCoordinator) receivedMiniBlock(miniBlockHash []byte) { - val, ok := tc.miniBlockPool.Peek(miniBlockHash) - if !ok { - return - } - - miniBlock, ok := val.(block.MiniBlock) - if !ok { - return - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - return - } - - _ = preproc.RequestTransactionsForMiniBlock(miniBlock) + val, ok := tc.miniBlockPool.Peek(miniBlockHash) + if !ok { + return + } + + miniBlock, ok := val.(block.MiniBlock) + if !ok { + return + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + return + } + + _ = preproc.RequestTransactionsForMiniBlock(miniBlock) } // processMiniBlockComplete - all transactions must be processed together, otherwise error func (tc *transactionCoordinator) processCompleteMiniBlock( - preproc process.PreProcessor, - miniBlock *block.MiniBlock, - round uint64, - haveTime func() bool, + preproc process.PreProcessor, + miniBlock *block.MiniBlock, + round uint64, + haveTime func() bool, ) error { - snapshot := tc.accounts.JournalLen() - err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) - if err != nil { - log.Debug(err.Error()) - errAccountState := tc.accounts.RevertToSnapshot(snapshot) - if errAccountState != nil { - // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Error(errAccountState.Error()) - } + snapshot := tc.accounts.JournalLen() + err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) + if err != nil { + log.Debug(err.Error()) + errAccountState := tc.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } - return err - } + return err + } - return nil + return nil } // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { - tc.mutInterimProcessors.RLock() - defer tc.mutInterimProcessors.RUnlock() - errMutex := sync.Mutex{} - var errFound error - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) - - for key, interimProc := range tc.interimProcessors { - if key == block.RewardsBlock { - // this has to be processed last - wg.Done() - continue - } - - go func(intermediateProcessor process.IntermediateTransactionHandler) { - err := intermediateProcessor.VerifyInterMiniBlocks(body) - if err != nil { - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(interimProc) - } - - wg.Wait() - - if errFound != nil { - return errFound - } - - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - return interimProc.VerifyInterMiniBlocks(body) + tc.mutInterimProcessors.RLock() + defer tc.mutInterimProcessors.RUnlock() + errMutex := sync.Mutex{} + var errFound error + // TODO: think if it is good in parallel or it is needed in sequences + wg := sync.WaitGroup{} + wg.Add(len(tc.interimProcessors)) + + for key, interimProc := range tc.interimProcessors { + if key == block.RewardsBlock { + // this has to be processed last + wg.Done() + continue + } + + go func(intermediateProcessor process.IntermediateTransactionHandler) { + err := intermediateProcessor.VerifyInterMiniBlocks(body) + if err != nil { + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(interimProc) + } + + wg.Wait() + + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } // IsInterfaceNil returns true if there is no value under the interface func (tc *transactionCoordinator) IsInterfaceNil() bool { - if tc == nil { - return true - } - return false + if tc == nil { + return true + } + return false } diff --git a/process/interface.go b/process/interface.go index f7f3220965d..c7603d78f6b 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1,377 +1,378 @@ package process import ( - "math/big" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "math/big" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine type TransactionProcessor interface { - ProcessTransaction(transaction *transaction.Transaction, round uint64) error - IsInterfaceNil() bool + ProcessTransaction(transaction *transaction.Transaction, round uint64) error + IsInterfaceNil() bool } // RewardTransactionProcessor is the interface for reward transaction execution engine type RewardTransactionProcessor interface { - ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error IsInterfaceNil() bool } // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { - ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error - IsInterfaceNil() bool + ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error + IsInterfaceNil() bool } // TxTypeHandler is an interface to calculate the transaction type type TxTypeHandler interface { - ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) - IsInterfaceNil() bool + ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) + IsInterfaceNil() bool } // TxValidator can determine if a provided transaction handler is valid or not from the process point of view type TxValidator interface { - IsTxValidForProcessing(txHandler data.TransactionHandler) bool - IsInterfaceNil() bool + IsTxValidForProcessing(txHandler data.TransactionHandler) bool + IsInterfaceNil() bool } // HeaderValidator can determine if a provided header handler is valid or not from the process point of view type HeaderValidator interface { - IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool - IsInterfaceNil() bool + IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool + IsInterfaceNil() bool } // TransactionCoordinator is an interface to coordinate transaction processing using multiple processors type TransactionCoordinator interface { - RequestMiniBlocks(header data.HeaderHandler) - RequestBlockTransactions(body block.Body) - IsDataPreparedForProcessing(haveTime func() time.Duration) error + RequestMiniBlocks(header data.HeaderHandler) + RequestBlockTransactions(body block.Body) + IsDataPreparedForProcessing(haveTime func() time.Duration) error - SaveBlockDataToStorage(body block.Body) error - RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) - RemoveBlockDataFromPool(body block.Body) error + SaveBlockDataToStorage(body block.Body) error + RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) + RemoveBlockDataFromPool(body block.Body) error - ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error - CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) - CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice + CreateBlockStarted() + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice - CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) + CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) - GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler + GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactions(body block.Body) error - IsInterfaceNil() bool + VerifyCreatedBlockTransactions(body block.Body) error + IsInterfaceNil() bool } // SmartContractProcessor is the main interface for the smart contract caller engine type SmartContractProcessor interface { - ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) - ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error - DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error - IsInterfaceNil() bool + ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) + ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error + DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error + IsInterfaceNil() bool } // IntermediateTransactionHandler handles transactions which are not resolved in only one step type IntermediateTransactionHandler interface { - AddIntermediateTransactions(txs []data.TransactionHandler) error - CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock - VerifyInterMiniBlocks(body block.Body) error - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - SaveCurrentIntermediateTxToStorage() error - GetAllCurrentFinishedTxs() map[string]data.TransactionHandler - CreateBlockStarted() - IsInterfaceNil() bool + AddIntermediateTransactions(txs []data.TransactionHandler) error + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + VerifyInterMiniBlocks(body block.Body) error + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + SaveCurrentIntermediateTxToStorage() error + GetAllCurrentFinishedTxs() map[string]data.TransactionHandler + CreateBlockStarted() + IsInterfaceNil() bool } // TransactionVerifier interface validates if the transaction is good and if it should be processed type TransactionVerifier interface { - IsTransactionValid(tx data.TransactionHandler) error + IsTransactionValid(tx data.TransactionHandler) error } // UnsignedTxHandler creates and verifies unsigned transactions for current round type UnsignedTxHandler interface { - CleanProcessedUTxs() - ProcessTransactionFee(cost *big.Int) - CreateAllUTxs() []data.TransactionHandler - VerifyCreatedUTxs() error - AddRewardTxFromBlock(tx data.TransactionHandler) + CleanProcessedUTxs() + ProcessTransactionFee(cost *big.Int) + CreateAllUTxs() []data.TransactionHandler + VerifyCreatedUTxs() error + AddRewardTxFromBlock(tx data.TransactionHandler) + IsInterfaceNil() bool } // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { - SetElrondCommunityAddress(elrond []byte) - ElrondCommunityAddress() []byte - SetConsensusRewardAddresses(consensusRewardAddresses []string) - ConsensusRewardAddresses() []string - LeaderAddress() []byte - BurnAddress() []byte - ShardIdForAddress([]byte) (uint32, error) - IsInterfaceNil() bool + SetElrondCommunityAddress(elrond []byte) + ElrondCommunityAddress() []byte + SetConsensusRewardAddresses(consensusRewardAddresses []string) + ConsensusRewardAddresses() []string + LeaderAddress() []byte + BurnAddress() []byte + ShardIdForAddress([]byte) (uint32, error) + IsInterfaceNil() bool } // PreProcessor is an interface used to prepare and process transaction data type PreProcessor interface { - CreateBlockStarted() - IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error + CreateBlockStarted() + IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error - RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) - SaveTxBlockToStorage(body block.Body) error + RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error + RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error - RequestBlockTransactions(body block.Body) int + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + RequestBlockTransactions(body block.Body) int - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - RequestTransactionsForMiniBlock(mb block.MiniBlock) int - ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error - CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + RequestTransactionsForMiniBlock(mb block.MiniBlock) int + ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) - GetAllCurrentUsedTxs() map[string]data.TransactionHandler - IsInterfaceNil() bool + GetAllCurrentUsedTxs() map[string]data.TransactionHandler + IsInterfaceNil() bool } // BlockProcessor is the main interface for block execution engine type BlockProcessor interface { - ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountState() - CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBody(dta []byte) data.BodyHandler - DecodeBlockHeader(dta []byte) data.HeaderHandler - AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string) + ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountState() + CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBody(dta []byte) data.BodyHandler + DecodeBlockHeader(dta []byte) data.HeaderHandler + AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddresses(consensusRewardAddresses []string) IsInterfaceNil() bool } // Checker provides functionality to checks the integrity and validity of a data structure type Checker interface { - // IntegrityAndValidity does both validity and integrity checks on the data structure - IntegrityAndValidity(coordinator sharding.Coordinator) error - // Integrity checks only the integrity of the data - Integrity(coordinator sharding.Coordinator) error - // IsInterfaceNil returns true if there is no value under the interface - IsInterfaceNil() bool + // IntegrityAndValidity does both validity and integrity checks on the data structure + IntegrityAndValidity(coordinator sharding.Coordinator) error + // Integrity checks only the integrity of the data + Integrity(coordinator sharding.Coordinator) error + // IsInterfaceNil returns true if there is no value under the interface + IsInterfaceNil() bool } // SigVerifier provides functionality to verify a signature of a signed data structure that holds also the verifying parameters type SigVerifier interface { - VerifySig() error + VerifySig() error } // SignedDataValidator provides functionality to check the validity and signature of a data structure type SignedDataValidator interface { - SigVerifier - Checker + SigVerifier + Checker } // HashAccesser interface provides functionality over hashable objects type HashAccesser interface { - SetHash([]byte) - Hash() []byte + SetHash([]byte) + Hash() []byte } // InterceptedBlockBody interface provides functionality over intercepted blocks type InterceptedBlockBody interface { - Checker - HashAccesser - GetUnderlyingObject() interface{} + Checker + HashAccesser + GetUnderlyingObject() interface{} } // Bootstrapper is an interface that defines the behaviour of a struct that is able // to synchronize the node type Bootstrapper interface { - AddSyncStateListener(func(isSyncing bool)) - ShouldSync() bool - StopSync() - StartSync() - IsInterfaceNil() bool + AddSyncStateListener(func(isSyncing bool)) + ShouldSync() bool + StopSync() + StartSync() + IsInterfaceNil() bool } // ForkDetector is an interface that defines the behaviour of a struct that is able // to detect forks type ForkDetector interface { - AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error - RemoveHeaders(nonce uint64, hash []byte) - CheckFork() (forkDetected bool, nonce uint64, hash []byte) - GetHighestFinalBlockNonce() uint64 - ProbableHighestNonce() uint64 - ResetProbableHighestNonceIfNeeded() - IsInterfaceNil() bool + AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error + RemoveHeaders(nonce uint64, hash []byte) + CheckFork() (forkDetected bool, nonce uint64, hash []byte) + GetHighestFinalBlockNonce() uint64 + ProbableHighestNonce() uint64 + ResetProbableHighestNonceIfNeeded() + IsInterfaceNil() bool } // InterceptorsContainer defines an interceptors holder data type with basic functionality type InterceptorsContainer interface { - Get(key string) (Interceptor, error) - Add(key string, val Interceptor) error - AddMultiple(keys []string, interceptors []Interceptor) error - Replace(key string, val Interceptor) error - Remove(key string) - Len() int - IsInterfaceNil() bool + Get(key string) (Interceptor, error) + Add(key string, val Interceptor) error + AddMultiple(keys []string, interceptors []Interceptor) error + Replace(key string, val Interceptor) error + Remove(key string) + Len() int + IsInterfaceNil() bool } // InterceptorsContainerFactory defines the functionality to create an interceptors container type InterceptorsContainerFactory interface { - Create() (InterceptorsContainer, error) - IsInterfaceNil() bool + Create() (InterceptorsContainer, error) + IsInterfaceNil() bool } // PreProcessorsContainer defines an PreProcessors holder data type with basic functionality type PreProcessorsContainer interface { - Get(key block.Type) (PreProcessor, error) - Add(key block.Type, val PreProcessor) error - AddMultiple(keys []block.Type, preprocessors []PreProcessor) error - Replace(key block.Type, val PreProcessor) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (PreProcessor, error) + Add(key block.Type, val PreProcessor) error + AddMultiple(keys []block.Type, preprocessors []PreProcessor) error + Replace(key block.Type, val PreProcessor) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // PreProcessorsContainerFactory defines the functionality to create an PreProcessors container type PreProcessorsContainerFactory interface { - Create() (PreProcessorsContainer, error) - IsInterfaceNil() bool + Create() (PreProcessorsContainer, error) + IsInterfaceNil() bool } // IntermediateProcessorContainer defines an IntermediateProcessor holder data type with basic functionality type IntermediateProcessorContainer interface { - Get(key block.Type) (IntermediateTransactionHandler, error) - Add(key block.Type, val IntermediateTransactionHandler) error - AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error - Replace(key block.Type, val IntermediateTransactionHandler) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (IntermediateTransactionHandler, error) + Add(key block.Type, val IntermediateTransactionHandler) error + AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error + Replace(key block.Type, val IntermediateTransactionHandler) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // IntermediateProcessorsContainerFactory defines the functionality to create an IntermediateProcessors container type IntermediateProcessorsContainerFactory interface { - Create() (IntermediateProcessorContainer, error) - IsInterfaceNil() bool + Create() (IntermediateProcessorContainer, error) + IsInterfaceNil() bool } // VirtualMachinesContainer defines a virtual machine holder data type with basic functionality type VirtualMachinesContainer interface { - Get(key []byte) (vmcommon.VMExecutionHandler, error) - Add(key []byte, val vmcommon.VMExecutionHandler) error - AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error - Replace(key []byte, val vmcommon.VMExecutionHandler) error - Remove(key []byte) - Len() int - Keys() [][]byte - IsInterfaceNil() bool + Get(key []byte) (vmcommon.VMExecutionHandler, error) + Add(key []byte, val vmcommon.VMExecutionHandler) error + AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error + Replace(key []byte, val vmcommon.VMExecutionHandler) error + Remove(key []byte) + Len() int + Keys() [][]byte + IsInterfaceNil() bool } // VirtualMachinesContainerFactory defines the functionality to create a virtual machine container type VirtualMachinesContainerFactory interface { - Create() (VirtualMachinesContainer, error) - VMAccountsDB() *hooks.VMAccountsDB - IsInterfaceNil() bool + Create() (VirtualMachinesContainer, error) + VMAccountsDB() *hooks.VMAccountsDB + IsInterfaceNil() bool } // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P) error - IsInterfaceNil() bool + ProcessReceivedMessage(message p2p.MessageP2P) error + IsInterfaceNil() bool } // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { - ConnectedPeersOnTopic(topic string) []p2p.PeerID - SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error - IsInterfaceNil() bool + ConnectedPeersOnTopic(topic string) []p2p.PeerID + SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error + IsInterfaceNil() bool } // TopicHandler defines the functionality needed by structs to manage topics and message processors type TopicHandler interface { - HasTopic(name string) bool - CreateTopic(name string, createChannelForTopic bool) error - RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error + HasTopic(name string) bool + CreateTopic(name string, createChannelForTopic bool) error + RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error } // TopicMessageHandler defines the functionality needed by structs to manage topics, message processors and to send data // to other peers type TopicMessageHandler interface { - MessageHandler - TopicHandler + MessageHandler + TopicHandler } // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { - PackDataInChunks(data [][]byte, limit int) ([][]byte, error) - IsInterfaceNil() bool + PackDataInChunks(data [][]byte, limit int) ([][]byte, error) + IsInterfaceNil() bool } // BlocksTracker defines the functionality to track all the notarised blocks type BlocksTracker interface { - UnnotarisedBlocks() []data.HeaderHandler - RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error - AddBlock(headerHandler data.HeaderHandler) - SetBlockBroadcastRound(nonce uint64, round int64) - BlockBroadcastRound(nonce uint64) int64 - IsInterfaceNil() bool + UnnotarisedBlocks() []data.HeaderHandler + RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error + AddBlock(headerHandler data.HeaderHandler) + SetBlockBroadcastRound(nonce uint64, round int64) + BlockBroadcastRound(nonce uint64) int64 + IsInterfaceNil() bool } // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { - RequestHeaderByNonce(shardId uint32, nonce uint64) - RequestTransaction(shardId uint32, txHashes [][]byte) - RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) - RequestRewardTransactions(destShardID uint32, txHashes [][]byte) - RequestMiniBlock(shardId uint32, miniblockHash []byte) - RequestHeader(shardId uint32, hash []byte) - IsInterfaceNil() bool + RequestHeaderByNonce(shardId uint32, nonce uint64) + RequestTransaction(shardId uint32, txHashes [][]byte) + RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) + RequestMiniBlock(shardId uint32, miniblockHash []byte) + RequestHeader(shardId uint32, hash []byte) + IsInterfaceNil() bool } // ArgumentsParser defines the functionality to parse transaction data into arguments and code for smart contracts type ArgumentsParser interface { - GetArguments() ([]*big.Int, error) - GetCode() ([]byte, error) - GetFunction() (string, error) - ParseData(data string) error + GetArguments() ([]*big.Int, error) + GetCode() ([]byte, error) + GetFunction() (string, error) + ParseData(data string) error - CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string - GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) - IsInterfaceNil() bool + CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string + GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) + IsInterfaceNil() bool } // TemporaryAccountsHandler defines the functionality to create temporary accounts and pass to VM. // This holder will contain usually one account from shard X that calls a SC in shard Y // so when executing the code in shard Y, this impl will hold an ephemeral copy of the sender account from shard X type TemporaryAccountsHandler interface { - AddTempAccount(address []byte, balance *big.Int, nonce uint64) - CleanTempAccounts() - TempAccount(address []byte) state.AccountHandler - IsInterfaceNil() bool + AddTempAccount(address []byte, balance *big.Int, nonce uint64) + CleanTempAccounts() + TempAccount(address []byte) state.AccountHandler + IsInterfaceNil() bool } // BlockSizeThrottler defines the functionality of adapting the node to the network speed/latency when it should send a // block to its peers which should be received in a limited time frame type BlockSizeThrottler interface { - MaxItemsToAdd() uint32 - Add(round uint64, items uint32) - Succeed(round uint64) - ComputeMaxItems() - IsInterfaceNil() bool + MaxItemsToAdd() uint32 + Add(round uint64, items uint32) + Succeed(round uint64) + ComputeMaxItems() + IsInterfaceNil() bool } diff --git a/process/mock/poolsHolderMock.go b/process/mock/poolsHolderMock.go index 733cbca1ca0..f3f9a30c576 100644 --- a/process/mock/poolsHolderMock.go +++ b/process/mock/poolsHolderMock.go @@ -88,7 +88,7 @@ func (phm *PoolsHolderMock) SetUnsignedTransactions(scrs dataRetriever.ShardedDa } // IsInterfaceNil returns true if there is no value under the interface -func (phf *PoolsHolderFake) IsInterfaceNil() bool { +func (phf *PoolsHolderMock) IsInterfaceNil() bool { if phf == nil { return true } diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go index 737fa2f7b38..1746f5108d9 100644 --- a/process/mock/rewardTxProcessorMock.go +++ b/process/mock/rewardTxProcessorMock.go @@ -1,17 +1,24 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" ) type RewardTxProcessorMock struct { - ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error + ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error } func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if scrp.ProcessRewardTransactionCalled == nil { - return nil - } + if scrp.ProcessRewardTransactionCalled == nil { + return nil + } - return scrp.ProcessRewardTransactionCalled(rTx) + return scrp.ProcessRewardTransactionCalled(rTx) +} + +func (scrp *RewardTxProcessorMock) IsInterfaceNil() bool { + if scrp == nil { + return true + } + return false } diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go index 7097c4a31e8..7e7175bdbff 100644 --- a/process/mock/unsignedTxHandlerMock.go +++ b/process/mock/unsignedTxHandlerMock.go @@ -51,3 +51,11 @@ func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { } return ut.VerifyCreatedUTxsCalled() } + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index e6b172730be..ad2fcc0d849 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,93 +1,101 @@ package rewardTransaction import ( - "math/big" + "math/big" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) type rewardTxProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewRewardTxProcessor creates a rewardTxProcessor instance // TODO: add unit tests func NewRewardTxProcessor( - accountsDB state.AccountsAdapter, - adrConv state.AddressConverter, - coordinator sharding.Coordinator, + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, ) (*rewardTxProcessor, error) { - if accountsDB == nil { - return nil, process.ErrNilAccountsAdapter - } - if adrConv == nil { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - return &rewardTxProcessor{ - accounts: accountsDB, - adrConv: adrConv, - shardCoordinator: coordinator, - }, nil + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + }, nil } func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - shardForCurrentNode := rtp.shardCoordinator.SelfId() - shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if rTx == nil { - return process.ErrNilRewardTransaction - } - - accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) - if err != nil { - return err - } - if accHandler == nil || accHandler.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - - rewardAcc, ok := accHandler.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if rTx.Value == nil { - return process.ErrNilValueFromRewardTransaction - } - - operation := big.NewInt(0) - operation = operation.Add(rTx.Value, rewardAcc.Balance) - err = rewardAcc.SetBalanceWithJournal(operation) - if err != nil { - return err - } - - return nil + if rTx == nil { + return process.ErrNilRewardTransaction + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + if accHandler == nil || accHandler.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + if err != nil { + return err + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxProcessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false } diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 1ae97c9e1aa..8593e051f1f 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -1,42 +1,43 @@ package transaction import ( - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "math/big" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" ) type TxProcessor *txProcessor func (txProc *txProcessor) GetAddresses(tx *transaction.Transaction) (adrSrc, adrDst state.AddressContainer, err error) { - return txProc.getAddresses(tx) + return txProc.getAddresses(tx) } func (txProc *txProcessor) GetAccounts(adrSrc, adrDst state.AddressContainer, ) (acntSrc, acntDst *state.Account, err error) { - return txProc.getAccounts(adrSrc, adrDst) + return txProc.getAccounts(adrSrc, adrDst) } func (txProc *txProcessor) CheckTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - return txProc.checkTxValues(tx, acntSnd) + return txProc.checkTxValues(tx, acntSnd) } func (txProc *txProcessor) MoveBalances(acntSrc, acntDst *state.Account, value *big.Int) error { - return txProc.moveBalances(acntSrc, acntDst, value) + return txProc.moveBalances(acntSrc, acntDst, value) } func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { - return txProc.increaseNonce(acntSrc) + return txProc.increaseNonce(acntSrc) } func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { - mutTxFee.Lock() - minTxFee = minTxFee - mutTxFee.Unlock() + mutTxFee.Lock() + minTxFee = minTxFee + mutTxFee.Unlock() } func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { - mutTxFee.Lock() - minGasPrice = minGasPrice - mutTxFee.Unlock() + mutTxFee.Lock() + minGasPrice = minGasPrice + mutTxFee.Unlock() } From 47298c4739b74de53b169ebedba0985f80a182e6 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 9 Sep 2019 15:16:03 +0300 Subject: [PATCH 088/234] process: add nil checks rewardsHandler --- process/block/preprocess/rewardsHandler.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 0e70271ceca..ebccc90bec7 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,7 +1,6 @@ package preprocess import ( - "github.com/ElrondNetwork/elrond-go/sharding" "math/big" "sync" @@ -12,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) const communityPercentage = 0.1 // 1 = 100%, 0 = 0% @@ -39,13 +39,16 @@ func NewRewardTxHandler( hasher hashing.Hasher, marshalizer marshal.Marshalizer, ) (*rewardsHandler, error) { - if address == nil { + if address == nil || address.IsInterfaceNil() { return nil, process.ErrNilSpecialAddressHandler } - if hasher == nil { + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if hasher == nil || hasher.IsInterfaceNil() { return nil, process.ErrNilHasher } - if marshalizer == nil { + if marshalizer == nil || marshalizer.IsInterfaceNil() { return nil, process.ErrNilMarshalizer } From 71efc22bd65c4cf72f174e0ad84f070b1b497a82 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 9 Sep 2019 19:02:11 +0300 Subject: [PATCH 089/234] process: fix rewardsHandler unit tests --- process/block/preprocess/export_test.go | 54 +- process/block/preprocess/rewardsHandler.go | 594 ++++++++-------- .../block/preprocess/rewardsHandler_test.go | 650 ++++++++++-------- process/mock/specialAddressHandlerMock.go | 95 +-- 4 files changed, 729 insertions(+), 664 deletions(-) diff --git a/process/block/preprocess/export_test.go b/process/block/preprocess/export_test.go index 0c628d58857..a51829514ce 100644 --- a/process/block/preprocess/export_test.go +++ b/process/block/preprocess/export_test.go @@ -1,53 +1,53 @@ package preprocess func (txs *transactions) ReceivedTransaction(txHash []byte) { - txs.receivedTransaction(txHash) + txs.receivedTransaction(txHash) } func (txs *transactions) AddTxHashToRequestedList(txHash []byte) { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() - if txs.txsForCurrBlock.txHashAndInfo == nil { - txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) - } - txs.txsForCurrBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} + if txs.txsForCurrBlock.txHashAndInfo == nil { + txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) + } + txs.txsForCurrBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} } func (txs *transactions) IsTxHashRequested(txHash []byte) bool { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() - return txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx == nil || - txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() + return txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx == nil || + txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() } func (txs *transactions) SetMissingTxs(missingTxs int) { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - txs.txsForCurrBlock.missingTxs = missingTxs - txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + txs.txsForCurrBlock.missingTxs = missingTxs + txs.txsForCurrBlock.mutTxsForBlock.Unlock() } func (scr *smartContractResults) AddScrHashToRequestedList(txHash []byte) { - scr.scrForBlock.mutTxsForBlock.Lock() - defer scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + defer scr.scrForBlock.mutTxsForBlock.Unlock() - if scr.scrForBlock.txHashAndInfo == nil { - scr.scrForBlock.txHashAndInfo = make(map[string]*txInfo) - } - scr.scrForBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} + if scr.scrForBlock.txHashAndInfo == nil { + scr.scrForBlock.txHashAndInfo = make(map[string]*txInfo) + } + scr.scrForBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} } func (scr *smartContractResults) IsScrHashRequested(txHash []byte) bool { - scr.scrForBlock.mutTxsForBlock.Lock() - defer scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + defer scr.scrForBlock.mutTxsForBlock.Unlock() - return scr.scrForBlock.txHashAndInfo[string(txHash)].tx == nil || - scr.scrForBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() + return scr.scrForBlock.txHashAndInfo[string(txHash)].tx == nil || + scr.scrForBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() } func (scr *smartContractResults) SetMissingScr(missingTxs int) { - scr.scrForBlock.mutTxsForBlock.Lock() - scr.scrForBlock.missingTxs = missingTxs - scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + scr.scrForBlock.missingTxs = missingTxs + scr.scrForBlock.mutTxsForBlock.Unlock() } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 3182b4e818d..5e700a16106 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,19 +1,19 @@ package preprocess import ( - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) // MinGasPrice is the minimal gas price to be paid for any transaction @@ -24,392 +24,398 @@ var MinGasPrice = uint64(0) var MinTxFee = uint64(0) const communityPercentage = 0.1 // 1 = 100%, 0 = 0% -const leaderPercentage = 0.4 // 1 = 100%, 0 = 0% -const burnPercentage = 0.5 // 1 = 100%, 0 = 0% +const leaderPercentage = 0.5 // 1 = 100%, 0 = 0% +const burnPercentage = 0.4 // 1 = 100%, 0 = 0% // TODO: Replace with valid reward value var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - adrConv state.AddressConverter - store dataRetriever.StorageService - rewardTxPool dataRetriever.ShardedDataCacherNotifier - - mutGenRewardTxs sync.RWMutex - protocolRewards []data.TransactionHandler - feeRewards []data.TransactionHandler - - mut sync.Mutex - accumulatedFees *big.Int - rewardTxsForBlock map[string]*rewardTx.RewardTx + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + adrConv state.AddressConverter + store dataRetriever.StorageService + rewardTxPool dataRetriever.ShardedDataCacherNotifier + + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + feeRewards []data.TransactionHandler + + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx } // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( - address process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - adrConv state.AddressConverter, - store dataRetriever.StorageService, - rewardTxPool dataRetriever.ShardedDataCacherNotifier, + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + adrConv state.AddressConverter, + store dataRetriever.StorageService, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, ) (*rewardsHandler, error) { - if address == nil { - return nil, process.ErrNilSpecialAddressHandler - } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if store == nil { - return nil, process.ErrNilStorage - } - - rtxh := &rewardsHandler{ - address: address, - shardCoordinator: shardCoordinator, - adrConv: adrConv, - hasher: hasher, - marshalizer: marshalizer, - store: store, - rewardTxPool: rewardTxPool, - } - - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) - - return rtxh, nil + if address == nil || address.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + + rtxh := &rewardsHandler{ + address: address, + shardCoordinator: shardCoordinator, + adrConv: adrConv, + hasher: hasher, + marshalizer: marshalizer, + store: store, + rewardTxPool: rewardTxPool, + } + + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil } // SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - for _, rTx := range rtxh.rewardTxsForBlock { - buff, err := rtxh.marshalizer.Marshal(rTx) - if err != nil { - return err - } - - errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) - if errNotCritical != nil { - log.Error(errNotCritical.Error()) - } - } - - return nil + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for _, rTx := range rtxh.rewardTxsForBlock { + buff, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return err + } + + errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Error(errNotCritical.Error()) + } + } + + return nil } // AddIntermediateTransactions adds intermediate transactions to local cache func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - for i := 0; i < len(txs); i++ { - addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } + for i := 0; i < len(txs); i++ { + addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } - if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { - continue - } + if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { + continue + } - rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) - if err != nil { - return err - } + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) + if err != nil { + return err + } - rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx - } + rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx + } - return nil + return nil } func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) { - address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return rtxh.shardCoordinator.NumberOfShards(), err - } - shardId := rtxh.shardCoordinator.ComputeId(address) + address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return rtxh.shardCoordinator.NumberOfShards(), err + } + shardId := rtxh.shardCoordinator.ComputeId(address) - return shardId, nil + return shardId, nil } // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - rtxh.mutGenRewardTxs.Lock() - calculatedRewardTxs := make([]data.TransactionHandler, 0) - rtxh.feeRewards = rtxh.createRewardFromFees() - rtxh.addTransactionsToPool(rtxh.feeRewards) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) - rtxh.mutGenRewardTxs.Unlock() + rtxh.mutGenRewardTxs.Lock() + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.feeRewards = rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rtxh.feeRewards) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.Unlock() - miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) - return miniBlocks + return miniBlocks } func (rtxh *rewardsHandler) addTransactionsToPool(rewardTxs []data.TransactionHandler) { - for _, rTx := range rewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - } - - // add the reward transaction to the the pool so that the processor can find it - cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) - rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) - } + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + } + + // add the reward transaction to the the pool so that the processor can find it + cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) + rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) + } } func (rtxh *rewardsHandler) miniblocksFromRewardTxs( - rewardTxs []data.TransactionHandler, + rewardTxs []data.TransactionHandler, ) map[uint32]*block.MiniBlock { - miniBlocks := make(map[uint32]*block.MiniBlock, 0) - - for _, rTx := range rewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - continue - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - continue - } - - var ok bool - var mb *block.MiniBlock - if mb, ok = miniBlocks[dstShId]; !ok { - mb = &block.MiniBlock{ - ReceiverShardID: dstShId, - } - } - - mb.TxHashes = append(mb.TxHashes, txHash) - miniBlocks[dstShId] = mb - } - - return miniBlocks + miniBlocks := make(map[uint32]*block.MiniBlock, 0) + + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks } // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { - err := rtxh.verifyCreatedRewardsTxs() - return err + err := rtxh.verifyCreatedRewardsTxs() + return err } // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { - rtxh.cleanCachedData() - rewardTxs := rtxh.createProtocolRewards() - rtxh.addTransactionsToPool(rewardTxs) + rtxh.cleanCachedData() + rewardTxs := rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rewardTxs) } // CreateMarshalizedData creates the marshalized data for broadcasting purposes func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - marshaledTxs := make([][]byte, 0) - for _, txHash := range txHashes { - rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] - if !ok { - return nil, process.ErrRewardTxNotFound - } - - marshaledTx, err := rtxh.marshalizer.Marshal(rTx) - if err != nil { - return nil, process.ErrMarshalWithoutSuccess - } - marshaledTxs = append(marshaledTxs, marshaledTx) - } - - return marshaledTxs, nil + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + marshaledTxs := make([][]byte, 0) + for _, txHash := range txHashes { + rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] + if !ok { + return nil, process.ErrRewardTxNotFound + } + + marshaledTx, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + marshaledTxs = append(marshaledTxs, marshaledTx) + } + + return marshaledTxs, nil } // ProcessTransactionFee adds the tx cost to the accumulated amount func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { - if cost == nil { - log.Debug(process.ErrNilValue.Error()) - return - } - - rtxh.mut.Lock() - rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) - rtxh.mut.Unlock() + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() } // cleanCachedData deletes the cached data func (rtxh *rewardsHandler) cleanCachedData() { - rtxh.mut.Lock() - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) - rtxh.mut.Unlock() - - rtxh.mutGenRewardTxs.Lock() - rtxh.feeRewards = make([]data.TransactionHandler, 0) - rtxh.protocolRewards = make([]data.TransactionHandler, 0) - rtxh.mutGenRewardTxs.Unlock() + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() + + rtxh.mutGenRewardTxs.Lock() + rtxh.feeRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.Unlock() } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { - x := new(big.Float).SetInt(value) - y := big.NewFloat(percentage) + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) - z := new(big.Float).Mul(x, y) + z := new(big.Float).Mul(x, y) - op := big.NewInt(0) - result, _ := z.Int(op) + op := big.NewInt(0) + result, _ := z.Int(op) - return result + return result } func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) - currTx.RcvAddr = rtxh.address.LeaderAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) - currTx.RcvAddr = rtxh.address.BurnAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) - currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } // createRewardFromFees creates the reward transactions from accumulated fees // According to economic paper, out of the block fees 50% are burned, 40% go to the // leader and 10% go to Elrond community fund. func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { - rtxh.accumulatedFees = big.NewInt(0) - return nil - } + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } - leaderTx := rtxh.createLeaderTx() - communityTx := rtxh.createCommunityTx() - burnTx := rtxh.createBurnTx() + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() - currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - return currFeeTxs + return currFeeTxs } // createProtocolRewards creates the protocol reward transactions func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { - consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() - consensusRewardTxs := make([]data.TransactionHandler, 0) - for _, address := range consensusRewardAddresses { - rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue - rTx.RcvAddr = []byte(address) - rTx.ShardId = rtxh.shardCoordinator.SelfId() - rTx.Epoch = rtxh.address.Epoch() - rTx.Round = rtxh.address.Round() + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardAddresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = rtxh.address.Epoch() + rTx.Round = rtxh.address.Round() - consensusRewardTxs = append(consensusRewardTxs, rTx) - } + consensusRewardTxs = append(consensusRewardTxs, rTx) + } - rtxh.mutGenRewardTxs.Lock() - rtxh.protocolRewards = consensusRewardTxs - rtxh.mutGenRewardTxs.Unlock() + rtxh.mutGenRewardTxs.Lock() + rtxh.protocolRewards = consensusRewardTxs + rtxh.mutGenRewardTxs.Unlock() - return consensusRewardTxs + return consensusRewardTxs } // VerifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { - calculatedRewardTxs := make([]data.TransactionHandler, 0) - rtxh.mutGenRewardTxs.RLock() - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) - rtxh.mutGenRewardTxs.RUnlock() - - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - totalFeesFromBlock := big.NewInt(0) - for _, rTx := range rtxh.rewardTxsForBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) - } - - totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedRewardTxs { - totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - - rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) - if err != nil { - return err - } - - txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] - if !ok { - return process.ErrRewardTxNotFound - } - if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { - return process.ErrRewardTxsDoNotMatch - } - } - - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - - return nil + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.RLock() + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.RUnlock() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, rTx := range rtxh.rewardTxsForBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedRewardTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) + if err != nil { + return err + } + + txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] + if !ok { + return process.ErrRewardTxNotFound + } + if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { + return process.ErrRewardTxsDoNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTotalTxsFeesDoNotMatch + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface func (rtxh *rewardsHandler) IsInterfaceNil() bool { - if rtxh == nil { - return true - } - return false + if rtxh == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 81cc957d4d5..40fde2482f0 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,346 +1,388 @@ package preprocess import ( - "github.com/ElrondNetwork/elrond-go/data" - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/data" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" ) -func TestNewRewardTxHandler_NilSpecialAddress(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - nil, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilSpecialAddressHandler, err) +func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilSpecialAddressHandler, err) +} + +func TestNewRewardTxHandler_NilHasher(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - nil, - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilShardCoordinator, err) } -func TestNewRewardTxHandler_NilHasher(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilHasher, err) +func TestNewRewardTxHandler_NilAddressConverter(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilAddressConverter, err) } -func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilMarshalizer, err) +func TestNewRewardTxHandler_NilChainStorer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + nil, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxHandler_NilRewardsPool(t *testing.T) { + t.Parallel() + + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + nil, + ) + + assert.Nil(t, th) + assert.NotNil(t, process.ErrNilRewardTxDataPool, err) } func TestNewRewardTxHandler_ValsOk(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) } func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.AddIntermediateTransactions(nil) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.AddIntermediateTransactions(nil) + assert.Nil(t, err) } func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - th.ProcessTransactionFee(nil) - assert.Equal(t, big.NewInt(0), th.accumulatedFees) - - th.ProcessTransactionFee(big.NewInt(10)) - assert.Equal(t, big.NewInt(10), th.accumulatedFees) - - th.ProcessTransactionFee(big.NewInt(100)) - assert.Equal(t, big.NewInt(110), th.accumulatedFees) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(nil) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(10)) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(100)) + assert.Equal(t, big.NewInt(110), th.accumulatedFees) } func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - th.ProcessTransactionFee(big.NewInt(10)) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) - assert.Equal(t, big.NewInt(10), th.accumulatedFees) - assert.Equal(t, 1, len(th.rewardTxsForBlock)) - - th.cleanCachedData() - assert.Equal(t, big.NewInt(0), th.accumulatedFees) - assert.Equal(t, 0, len(th.rewardTxsForBlock)) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(big.NewInt(10)) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + assert.Equal(t, 1, len(th.rewardTxsForBlock)) + + th.cleanCachedData() + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + assert.Equal(t, 0, len(th.rewardTxsForBlock)) } func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) - assert.Nil(t, err) - assert.NotNil(t, th) + assert.Nil(t, err) + assert.NotNil(t, th) - txs := th.createRewardFromFees() - assert.Equal(t, 0, len(txs)) + txs := th.createRewardFromFees() + assert.Equal(t, 0, len(txs)) - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) - txs = th.createRewardFromFees() - assert.Equal(t, 3, len(txs)) + txs = th.createRewardFromFees() + assert.Equal(t, 3, len(txs)) - totalSum := txs[0].GetValue().Uint64() - totalSum += txs[1].GetValue().Uint64() - totalSum += txs[2].GetValue().Uint64() + totalSum := txs[0].GetValue().Uint64() + totalSum += txs[1].GetValue().Uint64() + totalSum += txs[2].GetValue().Uint64() - assert.Equal(t, currTxFee.Uint64(), totalSum) + assert.Equal(t, currTxFee.Uint64(), totalSum) } -func TestRewardTxHandlerVerifyCreatedRewardsTxs(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - addr := &mock.SpecialAddressHandlerMock{} - th, err := NewRewardTxHandler( - addr, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.verifyCreatedRewardsTxs() - assert.Nil(t, err) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - - err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrRewardTxNotFound, err) - - badValue := big.NewInt(100) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: badValue}}) - - err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) - - th.cleanCachedData() - - currTxFee = big.NewInt(50) - halfCurrTxFee := big.NewInt(25) - th.ProcessTransactionFee(currTxFee) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: halfCurrTxFee}}) - - err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrRewardTxNotFound, err) - - th.cleanCachedData() - - currTxFee = big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.BurnAddress()}}) - - err = th.verifyCreatedRewardsTxs() - assert.Nil(t, err) +func TestRewardTxHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) } -func TestRewardTxHandlerCreateAllInterMiniBlocks(t *testing.T) { - t.Parallel() - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{ - AdrConv: &mock.AddressConverterMock{}, - ShardCoordinator: shardCoordinator}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - mbs := th.CreateAllInterMiniBlocks() - assert.Equal(t, 0, len(mbs)) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - - mbs = th.CreateAllInterMiniBlocks() - assert.Equal(t, 1, len(mbs)) +func TestRewardTxHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + extraVal := big.NewInt(100) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) } -func TestRewardTxHandlerVerifyInterMiniBlocks(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - addr := &mock.SpecialAddressHandlerMock{} - th, err := NewRewardTxHandler( - addr, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.VerifyInterMiniBlocks(nil) - assert.Nil(t, err) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - - err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrRewardTxNotFound, err) - - badValue := big.NewInt(100) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: badValue}}) - - err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) - - th.cleanCachedData() - - currTxFee = big.NewInt(50) - halfCurrTxFee := big.NewInt(25) - th.ProcessTransactionFee(currTxFee) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: halfCurrTxFee}}) - - err = th.VerifyInterMiniBlocks(nil) - assert.Equal(t, process.ErrRewardTxNotFound, err) - - th.cleanCachedData() +func TestRewardTxHandlerVerifyCreatedRewardsTxsOK(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) +} - currTxFee = big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.LeaderAddress()}}) +func TestRewardTxHandlerCreateAllInterMiniBlocksOK(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + mbs := th.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + mbs = th.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 597e4a3364d..d70311a2291 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -1,81 +1,98 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type SpecialAddressHandlerMock struct { - ElrondCommunityAddressCalled func() []byte - LeaderAddressCalled func() []byte - BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) (uint32, error) - AdrConv state.AddressConverter - ShardCoordinator sharding.Coordinator - - addresses []string - epoch uint32 - round uint64 + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + + addresses []string + epoch uint32 + round uint64 +} + +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + addresses: nil, + epoch: 0, + round: 0, + } } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses - sh.epoch = epoch - sh.round = round + sh.addresses = consensusRewardAddresses + sh.epoch = epoch + sh.round = round } func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { - if sh.BurnAddressCalled == nil { - return []byte("burn") - } + if sh.BurnAddressCalled == nil { + return []byte("burn") + } - return sh.BurnAddressCalled() + return sh.BurnAddressCalled() } func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { - if sh.ElrondCommunityAddressCalled == nil { - return []byte("elrond") - } + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond") + } - return sh.ElrondCommunityAddressCalled() + return sh.ElrondCommunityAddressCalled() } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + return sh.round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + return sh.epoch } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.LeaderAddressCalled == nil { - return []byte("leader") - } + if sh.LeaderAddressCalled == nil { + return []byte("leader") + } - return sh.LeaderAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return 0, err - } + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } - return sh.ShardCoordinator.ComputeId(convAdr), nil + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { - if sh == nil { - return true - } - return false + if sh == nil { + return true + } + return false } From 4f66477a6895ec77e92e971577470553059011d8 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 10 Sep 2019 11:04:16 +0300 Subject: [PATCH 090/234] proocess: fix after merge --- data/address/specialAddresses.go | 2 +- process/block/metablock.go | 2489 +++++++++-------- .../block/preprocess/rewardTxPreProcessor.go | 2 +- process/block/preprocess/rewardsHandler.go | 10 +- process/block/shardblock.go | 2 +- process/coordinator/process.go | 2 +- .../intermediateProcessorsContainerFactory.go | 1 - process/interface.go | 1 + process/transaction/process.go | 3 - 9 files changed, 1253 insertions(+), 1259 deletions(-) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index e697f686735..33cc044bf24 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -62,7 +62,7 @@ func (sp *specialAddresses) BurnAddress() []byte { return sp.burnAddress } -// SetConsensusRewardAddresses sets the consensus rewards addresses for the round +// SetConsensusData sets the consensus rewards addresses for the round func (sp *specialAddresses) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { sp.consensusRewardAddresses = consensusRewardAddresses sp.round = round diff --git a/process/block/metablock.go b/process/block/metablock.go index 891657d573a..6b1c98afdc0 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1,28 +1,28 @@ package block import ( - "encoding/base64" - "fmt" - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/throttle" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/ElrondNetwork/elrond-go/storage" + "encoding/base64" + "fmt" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/throttle" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" + "github.com/ElrondNetwork/elrond-go/storage" ) var shardMBHeaderCounterMutex = sync.RWMutex{} @@ -31,1393 +31,1398 @@ var shardMBHeadersTotalProcessed = 0 // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { - *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder + *baseProcessor + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder - currHighestShardHdrsNonces map[uint32]uint64 - requestedShardHdrsHashes map[string]bool - allNeededShardHdrsFound bool - mutRequestedShardHdrsHashes sync.RWMutex + currHighestShardHdrsNonces map[uint32]uint64 + requestedShardHdrsHashes map[string]bool + allNeededShardHdrsFound bool + mutRequestedShardHdrsHashes sync.RWMutex - shardsHeadersNonce *sync.Map + shardsHeadersNonce *sync.Map - nextKValidity uint32 + nextKValidity uint32 - chRcvAllHdrs chan bool + chRcvAllHdrs chan bool } // NewMetaProcessor creates a new metaProcessor object func NewMetaProcessor( - core serviceContainer.Core, - accounts state.AccountsAdapter, - dataPool dataRetriever.MetaPoolsHolder, - forkDetector process.ForkDetector, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, + core serviceContainer.Core, + accounts state.AccountsAdapter, + dataPool dataRetriever.MetaPoolsHolder, + forkDetector process.ForkDetector, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + store dataRetriever.StorageService, + startHeaders map[uint32]data.HeaderHandler, + requestHandler process.RequestHandler, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*metaProcessor, error) { - err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - uint64Converter) - if err != nil { - return nil, err - } - - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { - return nil, process.ErrNilHeadersDataPool - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - - blockSizeThrottler, err := throttle.NewBlockSizeThrottle() - if err != nil { - return nil, err - } - - base := &baseProcessor{ - accounts: accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - specialAddressHandler: specialAddressHandler, - uint64Converter: uint64Converter, - onRequestHeaderHandler: requestHandler.RequestHeader, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - } - - err = base.setLastNotarizedHeadersSlice(startHeaders) - if err != nil { - return nil, err - } - - mp := metaProcessor{ - core: core, - baseProcessor: base, - dataPool: dataPool, - } - - mp.requestedShardHdrsHashes = make(map[string]bool) - - headerPool := mp.dataPool.ShardHeaders() - headerPool.RegisterHandler(mp.receivedHeader) - - mp.chRcvAllHdrs = make(chan bool) - - mp.nextKValidity = process.ShardBlockFinality - mp.allNeededShardHdrsFound = true - - mp.shardsHeadersNonce = &sync.Map{} - - return &mp, nil + err := checkProcessorNilParameters( + accounts, + forkDetector, + hasher, + marshalizer, + store, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + uint64Converter) + if err != nil { + return nil, err + } + + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { + return nil, process.ErrNilHeadersDataPool + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + + blockSizeThrottler, err := throttle.NewBlockSizeThrottle() + if err != nil { + return nil, err + } + + base := &baseProcessor{ + accounts: accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: forkDetector, + hasher: hasher, + marshalizer: marshalizer, + store: store, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, + uint64Converter: uint64Converter, + onRequestHeaderHandler: requestHandler.RequestHeader, + onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + appStatusHandler: statusHandler.NewNilStatusHandler(), + } + + err = base.setLastNotarizedHeadersSlice(startHeaders) + if err != nil { + return nil, err + } + + mp := metaProcessor{ + core: core, + baseProcessor: base, + dataPool: dataPool, + } + + mp.requestedShardHdrsHashes = make(map[string]bool) + + headerPool := mp.dataPool.ShardHeaders() + headerPool.RegisterHandler(mp.receivedHeader) + + mp.chRcvAllHdrs = make(chan bool) + + mp.nextKValidity = process.ShardBlockFinality + mp.allNeededShardHdrsFound = true + + mp.shardsHeadersNonce = &sync.Map{} + + return &mp, nil } // ProcessBlock processes a block. It returns nil if all ok or the specific error func (mp *metaProcessor) ProcessBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, - haveTime func() time.Duration, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + haveTime func() time.Duration, ) error { - if haveTime == nil { - return process.ErrNilHaveTimeHandler - } - - err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) - - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) - err = mp.waitForBlockHeaders(haveTime()) - mp.mutRequestedShardHdrsHashes.Lock() - mp.allNeededShardHdrsFound = true - unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) - mp.mutRequestedShardHdrsHashes.Unlock() - log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) - if err != nil { - return err - } - } - - if mp.accounts.JournalLen() != 0 { - return process.ErrAccountStateDirty - } - - defer func() { - go mp.checkAndRequestIfShardHeadersMissing(header.Round) - }() - - highestNonceHdrs, err := mp.checkShardHeadersValidity(header) - if err != nil { - return err - } - - err = mp.checkShardHeadersFinality(header, highestNonceHdrs) - if err != nil { - return err - } - - defer func() { - if err != nil { - mp.RevertAccountState() - } - }() - - err = mp.processBlockHeaders(header, header.Round, haveTime) - if err != nil { - return err - } - - if !mp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch - return err - } - - return nil + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) + + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { + log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) + err = mp.waitForBlockHeaders(haveTime()) + mp.mutRequestedShardHdrsHashes.Lock() + mp.allNeededShardHdrsFound = true + unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) + mp.mutRequestedShardHdrsHashes.Unlock() + log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) + if err != nil { + return err + } + } + + if mp.accounts.JournalLen() != 0 { + return process.ErrAccountStateDirty + } + + defer func() { + go mp.checkAndRequestIfShardHeadersMissing(header.Round) + }() + + highestNonceHdrs, err := mp.checkShardHeadersValidity(header) + if err != nil { + return err + } + + err = mp.checkShardHeadersFinality(header, highestNonceHdrs) + if err != nil { + return err + } + + defer func() { + if err != nil { + mp.RevertAccountState() + } + }() + + err = mp.processBlockHeaders(header, header.Round, haveTime) + if err != nil { + return err + } + + if !mp.verifyStateRoot(header.GetRootHash()) { + err = process.ErrRootStateMissmatch + return err + } + + return nil +} + +// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group +func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { + // TODO set the reward addresses for metachain consensus nodes } func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) - if err != nil { - log.Debug(err.Error()) - return - } - - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - // map from *block.Header to dataHandler - sortedHdrs := make([]data.HeaderHandler, 0) - for j := 0; j < len(sortedHdrPerShard[i]); j++ { - sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) - } - - err := mp.requestHeadersIfMissing(sortedHdrs, i, round) - if err != nil { - log.Debug(err.Error()) - continue - } - } - - return + _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) + if err != nil { + log.Debug(err.Error()) + return + } + + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + // map from *block.Header to dataHandler + sortedHdrs := make([]data.HeaderHandler, 0) + for j := 0; j < len(sortedHdrPerShard[i]); j++ { + sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) + } + + err := mp.requestHeadersIfMissing(sortedHdrs, i, round) + if err != nil { + log.Debug(err.Error()) + continue + } + } + + return } func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[string]*block.Header) { - if mp.core == nil || mp.core.Indexer() == nil { - return - } + if mp.core == nil || mp.core.Indexer() == nil { + return + } - // Update tps benchmarks in the DB - tpsBenchmark := mp.core.TPSBenchmark() - if tpsBenchmark != nil { - go mp.core.Indexer().UpdateTPS(tpsBenchmark) - } + // Update tps benchmarks in the DB + tpsBenchmark := mp.core.TPSBenchmark() + if tpsBenchmark != nil { + go mp.core.Indexer().UpdateTPS(tpsBenchmark) + } - //TODO: maybe index metablocks also? + //TODO: maybe index metablocks also? } // removeBlockInfoFromPool removes the block info from associated pools func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { - if header == nil || header.IsInterfaceNil() { - return process.ErrNilMetaBlockHeader - } - - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil || headerPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - - headerNoncesPool := mp.dataPool.HeadersNonces() - if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { - return process.ErrNilHeadersNoncesDataPool - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - obj, ok := headerPool.Peek(shardData.HeaderHash) - if !ok { - continue - } - - hdr, ok := obj.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - headerPool.Remove(shardData.HeaderHash) - headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) - } - - return nil + if header == nil || header.IsInterfaceNil() { + return process.ErrNilMetaBlockHeader + } + + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil || headerPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + + headerNoncesPool := mp.dataPool.HeadersNonces() + if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { + return process.ErrNilHeadersNoncesDataPool + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + + obj, ok := headerPool.Peek(shardData.HeaderHash) + if !ok { + continue + } + + hdr, ok := obj.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + headerPool.Remove(shardData.HeaderHash) + headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) + } + + return nil } // RestoreBlockIntoPools restores the block into associated pools func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error { - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilMetaBlockHeader - } - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil || headerPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - - headerNoncesPool := mp.dataPool.HeadersNonces() - if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { - return process.ErrNilHeadersNoncesDataPool - } - - hdrHashes := make([][]byte, 0) - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - hdrHashes = append(hdrHashes, shardData.HeaderHash) - } - - for _, hdrHash := range hdrHashes { - buff, err := mp.store.Get(dataRetriever.BlockHeaderUnit, hdrHash) - if err != nil { - log.Error(err.Error()) - continue - } - - hdr := block.Header{} - err = mp.marshalizer.Unmarshal(&hdr, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - headerPool.Put(hdrHash, &hdr) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(hdr.ShardId, hdrHash) - headerNoncesPool.Merge(hdr.Nonce, syncMap) - - err = mp.store.GetStorer(dataRetriever.BlockHeaderUnit).Remove(hdrHash) - if err != nil { - log.Error(err.Error()) - } - - nonceToByteSlice := mp.uint64Converter.ToByteSlice(hdr.Nonce) - err = mp.store.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Remove(nonceToByteSlice) - if err != nil { - log.Error(err.Error()) - } - - shardMBHeaderCounterMutex.Lock() - shardMBHeadersTotalProcessed -= len(hdr.MiniBlockHeaders) - shardMBHeaderCounterMutex.Unlock() - } - - mp.removeLastNotarized() - - return nil + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilMetaBlockHeader + } + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil || headerPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + + headerNoncesPool := mp.dataPool.HeadersNonces() + if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { + return process.ErrNilHeadersNoncesDataPool + } + + hdrHashes := make([][]byte, 0) + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + hdrHashes = append(hdrHashes, shardData.HeaderHash) + } + + for _, hdrHash := range hdrHashes { + buff, err := mp.store.Get(dataRetriever.BlockHeaderUnit, hdrHash) + if err != nil { + log.Error(err.Error()) + continue + } + + hdr := block.Header{} + err = mp.marshalizer.Unmarshal(&hdr, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + headerPool.Put(hdrHash, &hdr) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(hdr.ShardId, hdrHash) + headerNoncesPool.Merge(hdr.Nonce, syncMap) + + err = mp.store.GetStorer(dataRetriever.BlockHeaderUnit).Remove(hdrHash) + if err != nil { + log.Error(err.Error()) + } + + nonceToByteSlice := mp.uint64Converter.ToByteSlice(hdr.Nonce) + err = mp.store.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Remove(nonceToByteSlice) + if err != nil { + log.Error(err.Error()) + } + + shardMBHeaderCounterMutex.Lock() + shardMBHeadersTotalProcessed -= len(hdr.MiniBlockHeaders) + shardMBHeaderCounterMutex.Unlock() + } + + mp.removeLastNotarized() + + return nil } // CreateBlockBody creates block body of metachain func (mp *metaProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - mp.blockSizeThrottler.ComputeMaxItems() - return &block.MetaBlockBody{}, nil + log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + mp.blockSizeThrottler.ComputeMaxItems() + return &block.MetaBlockBody{}, nil } func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint64, haveTime func() time.Duration) error { - hdrPool := mp.dataPool.ShardHeaders() - - msg := "" - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - headerHash := shardData.HeaderHash - shardMiniBlockHeader := &shardData.ShardMiniBlockHeaders[j] - err := mp.checkAndProcessShardMiniBlockHeader( - headerHash, - shardMiniBlockHeader, - hdrPool, - round, - shardData.ShardId, - ) - if err != nil { - return err - } - - msg = fmt.Sprintf("%s\n%s", msg, core.ToB64(shardMiniBlockHeader.Hash)) - } - } - - if len(msg) > 0 { - log.Debug(fmt.Sprintf("the following miniblocks hashes were successfully processed:%s\n", msg)) - } - - return nil + hdrPool := mp.dataPool.ShardHeaders() + + msg := "" + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + headerHash := shardData.HeaderHash + shardMiniBlockHeader := &shardData.ShardMiniBlockHeaders[j] + err := mp.checkAndProcessShardMiniBlockHeader( + headerHash, + shardMiniBlockHeader, + hdrPool, + round, + shardData.ShardId, + ) + if err != nil { + return err + } + + msg = fmt.Sprintf("%s\n%s", msg, core.ToB64(shardMiniBlockHeader.Hash)) + } + } + + if len(msg) > 0 { + log.Debug(fmt.Sprintf("the following miniblocks hashes were successfully processed:%s\n", msg)) + } + + return nil } // CommitBlock commits the block in the blockchain if everything was checked successfully func (mp *metaProcessor) CommitBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - var err error - defer func() { - if err != nil { - mp.RevertAccountState() - } - }() - - tempHeaderPool := make(map[string]*block.Header) - - err = checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - err = mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - buff, err := mp.marshalizer.Marshal(header) - if err != nil { - return err - } - - headerHash := mp.hasher.Compute(string(buff)) - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - errNotCritical := mp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) - log.LogIfError(errNotCritical) - - errNotCritical = mp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) - log.LogIfError(errNotCritical) - - headerNoncePool := mp.dataPool.HeadersNonces() - if headerNoncePool == nil { - err = process.ErrNilDataPoolHolder - return err - } - - //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(headerHandler.GetShardID(), headerHash) - headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) - - body, ok := bodyHandler.(*block.MetaBlockBody) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if header == nil { - return err - } - - mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) - - tempHeaderPool[string(shardData.HeaderHash)] = header - - buff, err = mp.marshalizer.Marshal(header) - if err != nil { - return err - } - - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) - log.LogIfError(errNotCritical) - - errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) - log.LogIfError(errNotCritical) - } - - mp.saveMetricCrossCheckBlockHeight() - - err = mp.saveLastNotarizedHeader(header) - if err != nil { - return err - } - - _, err = mp.accounts.Commit() - if err != nil { - return err - } - - log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", - header.Nonce, - core.ToB64(headerHash))) - - errNotCritical = mp.removeBlockInfoFromPool(header) - if errNotCritical != nil { - log.Info(errNotCritical.Error()) - } - - errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) - if errNotCritical != nil { - log.Info(errNotCritical.Error()) - } - - hdrsToAttestFinality := mp.nextKValidity - mp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) - - err = chainHandler.SetCurrentBlockBody(body) - if err != nil { - return err - } + var err error + defer func() { + if err != nil { + mp.RevertAccountState() + } + }() + + tempHeaderPool := make(map[string]*block.Header) + + err = checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + err = mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + buff, err := mp.marshalizer.Marshal(header) + if err != nil { + return err + } + + headerHash := mp.hasher.Compute(string(buff)) + nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) + errNotCritical := mp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) + log.LogIfError(errNotCritical) + + errNotCritical = mp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) + log.LogIfError(errNotCritical) + + headerNoncePool := mp.dataPool.HeadersNonces() + if headerNoncePool == nil { + err = process.ErrNilDataPoolHolder + return err + } + + //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(headerHandler.GetShardID(), headerHash) + headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) + + body, ok := bodyHandler.(*block.MetaBlockBody) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if header == nil { + return err + } + + mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) + + tempHeaderPool[string(shardData.HeaderHash)] = header + + buff, err = mp.marshalizer.Marshal(header) + if err != nil { + return err + } + + nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) + errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) + log.LogIfError(errNotCritical) + + errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) + log.LogIfError(errNotCritical) + } + + mp.saveMetricCrossCheckBlockHeight() + + err = mp.saveLastNotarizedHeader(header) + if err != nil { + return err + } + + _, err = mp.accounts.Commit() + if err != nil { + return err + } + + log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", + header.Nonce, + core.ToB64(headerHash))) + + errNotCritical = mp.removeBlockInfoFromPool(header) + if errNotCritical != nil { + log.Info(errNotCritical.Error()) + } + + errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) + if errNotCritical != nil { + log.Info(errNotCritical.Error()) + } + + hdrsToAttestFinality := mp.nextKValidity + mp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) + + err = chainHandler.SetCurrentBlockBody(body) + if err != nil { + return err + } - err = chainHandler.SetCurrentBlockHeader(header) - if err != nil { - return err - } + err = chainHandler.SetCurrentBlockHeader(header) + if err != nil { + return err + } - chainHandler.SetCurrentBlockHeaderHash(headerHash) - - if mp.core != nil && mp.core.TPSBenchmark() != nil { - mp.core.TPSBenchmark().Update(header) - } + chainHandler.SetCurrentBlockHeaderHash(headerHash) + + if mp.core != nil && mp.core.TPSBenchmark() != nil { + mp.core.TPSBenchmark().Update(header) + } - mp.indexBlock(header, tempHeaderPool) + mp.indexBlock(header, tempHeaderPool) - go mp.displayMetaBlock(header) + go mp.displayMetaBlock(header) - mp.blockSizeThrottler.Succeed(header.Round) + mp.blockSizeThrottler.Succeed(header.Round) - return nil + return nil } func (mp *metaProcessor) updateShardHeadersNonce(key uint32, value uint64) { - valueStoredI, ok := mp.shardsHeadersNonce.Load(key) - if !ok { - mp.shardsHeadersNonce.Store(key, value) - return - } - - valueStored, ok := valueStoredI.(uint64) - if !ok { - mp.shardsHeadersNonce.Store(key, value) - return - } - - if valueStored < value { - mp.shardsHeadersNonce.Store(key, value) - } + valueStoredI, ok := mp.shardsHeadersNonce.Load(key) + if !ok { + mp.shardsHeadersNonce.Store(key, value) + return + } + + valueStored, ok := valueStoredI.(uint64) + if !ok { + mp.shardsHeadersNonce.Store(key, value) + return + } + + if valueStored < value { + mp.shardsHeadersNonce.Store(key, value) + } } func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { - crossCheckBlockHeight := "" - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - valueStoredI, ok := mp.shardsHeadersNonce.Load(i) - if !ok { - continue - } - - valueStored, ok := valueStoredI.(uint64) - if !ok { - continue - } - - if i > 0 { - crossCheckBlockHeight += ", " - } - - crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) - } - - mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) + crossCheckBlockHeight := "" + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + valueStoredI, ok := mp.shardsHeadersNonce.Load(i) + if !ok { + continue + } + + valueStored, ok := valueStoredI.(uint64) + if !ok { + continue + } + + if i > 0 { + crossCheckBlockHeight += ", " + } + + crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) + } + + mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) } func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error { - mp.mutNotarizedHdrs.Lock() - defer mp.mutNotarizedHdrs.Unlock() - - if mp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - tmpLastNotarizedHdrForShard := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - return err - } - - if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { - tmpLastNotarizedHdrForShard[header.ShardId] = header - } - } - - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) - DisplayLastNotarized(mp.marshalizer, mp.hasher, tmpLastNotarizedHdrForShard[i], i) - } - - return nil + mp.mutNotarizedHdrs.Lock() + defer mp.mutNotarizedHdrs.Unlock() + + if mp.notarizedHdrs == nil { + return process.ErrNotarizedHdrsSliceIsNil + } + + tmpLastNotarizedHdrForShard := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { + return err + } + + if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { + tmpLastNotarizedHdrForShard[header.ShardId] = header + } + } + + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) + DisplayLastNotarized(mp.marshalizer, mp.hasher, tmpLastNotarizedHdrForShard[i], i) + } + + return nil } // gets all the headers from the metablock in sorted order per shard func (mp *metaProcessor) getSortedShardHdrsFromMetablock(metaBlock *block.MetaBlock) (map[uint32][]*block.Header, error) { - sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) - - requestedHeaders := 0 - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - log.Debug(err.Error()) - requestedHeaders++ - go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) - continue - } - - sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) - } - - if requestedHeaders > 0 { - return nil, process.ErrMissingHeader - } - - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) <= 1 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() - }) - } - - return sortedShardHdrs, nil + sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) + + requestedHeaders := 0 + for i := 0; i < len(metaBlock.ShardInfo); i++ { + shardData := metaBlock.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { + log.Debug(err.Error()) + requestedHeaders++ + go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) + continue + } + + sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) + } + + if requestedHeaders > 0 { + return nil, process.ErrMissingHeader + } + + for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { + hdrsForShard := sortedShardHdrs[shId] + if len(hdrsForShard) <= 1 { + continue + } + + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() + }) + } + + return sortedShardHdrs, nil } // check if shard headers were signed and constructed correctly and returns headers which has to be // checked for finality func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, process.ErrNotarizedHdrsSliceIsNil - } - - tmpLastNotarized := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarized[i] = mp.lastNotarizedHdrForShard(i) - } - mp.mutNotarizedHdrs.RUnlock() - - sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) - if err != nil { - return nil, err - } - - highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) == 0 { - continue - } - - for i := 0; i < len(hdrsForShard); i++ { - err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) - if err != nil { - return nil, err - } - tmpLastNotarized[shId] = hdrsForShard[i] - highestNonceHdrs[shId] = hdrsForShard[i] - } - } - - return highestNonceHdrs, nil + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, process.ErrNotarizedHdrsSliceIsNil + } + + tmpLastNotarized := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + tmpLastNotarized[i] = mp.lastNotarizedHdrForShard(i) + } + mp.mutNotarizedHdrs.RUnlock() + + sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) + if err != nil { + return nil, err + } + + highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { + hdrsForShard := sortedShardHdrs[shId] + if len(hdrsForShard) == 0 { + continue + } + + for i := 0; i < len(hdrsForShard); i++ { + err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) + if err != nil { + return nil, err + } + tmpLastNotarized[shId] = hdrsForShard[i] + highestNonceHdrs[shId] = hdrsForShard[i] + } + } + + return highestNonceHdrs, nil } // check if shard headers are final by checking if newer headers were constructed upon them func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - if header == nil { - return process.ErrNilBlockHeader - } - - //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) - if err != nil { - return err - } - - for index, lastVerifiedHdr := range highestNonceHdrs { - if index != lastVerifiedHdr.GetShardID() { - return process.ErrShardIdMissmatch - } - - // verify if there are "K" block after current to make this one final - nextBlocksVerified := uint32(0) - shId := lastVerifiedHdr.GetShardID() - for i := 0; i < len(sortedHdrPerShard[shId]); i++ { - if nextBlocksVerified >= mp.nextKValidity { - break - } - - // found a header with the next nonce - tmpHdr := sortedHdrPerShard[shId][i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified < mp.nextKValidity { - go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) - return process.ErrHeaderNotFinal - } - } - - return nil + if header == nil { + return process.ErrNilBlockHeader + } + + //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done + _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) + if err != nil { + return err + } + + for index, lastVerifiedHdr := range highestNonceHdrs { + if index != lastVerifiedHdr.GetShardID() { + return process.ErrShardIdMissmatch + } + + // verify if there are "K" block after current to make this one final + nextBlocksVerified := uint32(0) + shId := lastVerifiedHdr.GetShardID() + for i := 0; i < len(sortedHdrPerShard[shId]); i++ { + if nextBlocksVerified >= mp.nextKValidity { + break + } + + // found a header with the next nonce + tmpHdr := sortedHdrPerShard[shId][i] + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified < mp.nextKValidity { + go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + return process.ErrHeaderNotFinal + } + } + + return nil } func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { - if currHdr == nil { - return false, nil - } - if sortedShardHdrs == nil { - return false, nil - } - if lastHdr == nil { - return false, nil - } - - err := mp.isHdrConstructionValid(currHdr, lastHdr) - if err != nil { - return false, nil - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := uint32(0) - hdrIds := make([]uint32, 0) - for i := 0; i < len(sortedShardHdrs); i++ { - if nextBlocksVerified >= mp.nextKValidity { - return true, hdrIds - } - - // found a header with the next nonce - tmpHdr := sortedShardHdrs[i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - hdrIds = append(hdrIds, uint32(i)) - } - } - - if nextBlocksVerified >= mp.nextKValidity { - return true, hdrIds - } - - return false, nil + if currHdr == nil { + return false, nil + } + if sortedShardHdrs == nil { + return false, nil + } + if lastHdr == nil { + return false, nil + } + + err := mp.isHdrConstructionValid(currHdr, lastHdr) + if err != nil { + return false, nil + } + + // verify if there are "K" block after current to make this one final + lastVerifiedHdr := currHdr + nextBlocksVerified := uint32(0) + hdrIds := make([]uint32, 0) + for i := 0; i < len(sortedShardHdrs); i++ { + if nextBlocksVerified >= mp.nextKValidity { + return true, hdrIds + } + + // found a header with the next nonce + tmpHdr := sortedShardHdrs[i] + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + hdrIds = append(hdrIds, uint32(i)) + } + } + + if nextBlocksVerified >= mp.nextKValidity { + return true, hdrIds + } + + return false, nil } // receivedHeader is a call back function which is called when a new header // is added in the headers pool func (mp *metaProcessor) receivedHeader(headerHash []byte) { - shardHdrsCache := mp.dataPool.ShardHeaders() - if shardHdrsCache == nil { - return - } - - shardHdrsNoncesCache := mp.dataPool.HeadersNonces() - if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { - return - } - - obj, ok := shardHdrsCache.Peek(headerHash) - if !ok { - return - } - - header, ok := obj.(data.HeaderHandler) - if !ok { - return - } - - log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", - core.ToB64(headerHash), - header.GetNonce())) - - mp.mutRequestedShardHdrsHashes.Lock() - - if !mp.allNeededShardHdrsFound { - if mp.requestedShardHdrsHashes[string(headerHash)] { - delete(mp.requestedShardHdrsHashes, string(headerHash)) - - if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { - mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() - } - } - - lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqShardHdrsHashes == 0 { - requestedBlockHeaders := mp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - areFinalAttestingHdrsInCache = true - } else { - log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) - } - } - - mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache - - mp.mutRequestedShardHdrsHashes.Unlock() - - if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { - mp.chRcvAllHdrs <- true - } - } else { - mp.mutRequestedShardHdrsHashes.Unlock() - } + shardHdrsCache := mp.dataPool.ShardHeaders() + if shardHdrsCache == nil { + return + } + + shardHdrsNoncesCache := mp.dataPool.HeadersNonces() + if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { + return + } + + obj, ok := shardHdrsCache.Peek(headerHash) + if !ok { + return + } + + header, ok := obj.(data.HeaderHandler) + if !ok { + return + } + + log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", + core.ToB64(headerHash), + header.GetNonce())) + + mp.mutRequestedShardHdrsHashes.Lock() + + if !mp.allNeededShardHdrsFound { + if mp.requestedShardHdrsHashes[string(headerHash)] { + delete(mp.requestedShardHdrsHashes, string(headerHash)) + + if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { + mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() + } + } + + lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) + areFinalAttestingHdrsInCache := false + if lenReqShardHdrsHashes == 0 { + requestedBlockHeaders := mp.requestFinalMissingHeaders() + if requestedBlockHeaders == 0 { + areFinalAttestingHdrsInCache = true + } else { + log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) + } + } + + mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache + + mp.mutRequestedShardHdrsHashes.Unlock() + + if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { + mp.chRcvAllHdrs <- true + } + } else { + mp.mutRequestedShardHdrsHashes.Unlock() + } } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related // to the block which should be processed func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { - requestedBlockHeaders := uint32(0) - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { - if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { - continue - } - - _, _, err := process.GetShardHeaderFromPoolWithNonce( - i, - shardId, - mp.dataPool.ShardHeaders(), - mp.dataPool.HeadersNonces()) - if err != nil { - requestedBlockHeaders++ - go mp.onRequestHeaderHandlerByNonce(shardId, i) - } - } - } - - return requestedBlockHeaders + requestedBlockHeaders := uint32(0) + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { + if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { + continue + } + + _, _, err := process.GetShardHeaderFromPoolWithNonce( + i, + shardId, + mp.dataPool.ShardHeaders(), + mp.dataPool.HeadersNonces()) + if err != nil { + requestedBlockHeaders++ + go mp.onRequestHeaderHandlerByNonce(shardId, i) + } + } + } + + return requestedBlockHeaders } func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { - mp.mutRequestedShardHdrsHashes.Lock() - - mp.allNeededShardHdrsFound = true - - if len(metaBlock.ShardInfo) == 0 { - mp.mutRequestedShardHdrsHashes.Unlock() - return 0, 0 - } - - missingHeaderHashes := mp.computeMissingHeaders(metaBlock) - - requestedBlockHeaders := uint32(0) - mp.requestedShardHdrsHashes = make(map[string]bool) - for shardId, headerHashes := range missingHeaderHashes { - for _, headerHash := range headerHashes { - requestedBlockHeaders++ - mp.requestedShardHdrsHashes[string(headerHash)] = true - go mp.onRequestHeaderHandler(shardId, headerHash) - } - } - - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } else { - requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } - } - - if !mp.allNeededShardHdrsFound { - process.EmptyChannel(mp.chRcvAllHdrs) - } - - mp.mutRequestedShardHdrsHashes.Unlock() - - return requestedBlockHeaders, requestedFinalBlockHeaders + mp.mutRequestedShardHdrsHashes.Lock() + + mp.allNeededShardHdrsFound = true + + if len(metaBlock.ShardInfo) == 0 { + mp.mutRequestedShardHdrsHashes.Unlock() + return 0, 0 + } + + missingHeaderHashes := mp.computeMissingHeaders(metaBlock) + + requestedBlockHeaders := uint32(0) + mp.requestedShardHdrsHashes = make(map[string]bool) + for shardId, headerHashes := range missingHeaderHashes { + for _, headerHash := range headerHashes { + requestedBlockHeaders++ + mp.requestedShardHdrsHashes[string(headerHash)] = true + go mp.onRequestHeaderHandler(shardId, headerHash) + } + } + + requestedFinalBlockHeaders := uint32(0) + if requestedBlockHeaders > 0 { + mp.allNeededShardHdrsFound = false + } else { + requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() + if requestedFinalBlockHeaders > 0 { + mp.allNeededShardHdrsFound = false + } + } + + if !mp.allNeededShardHdrsFound { + process.EmptyChannel(mp.chRcvAllHdrs) + } + + mp.mutRequestedShardHdrsHashes.Unlock() + + return requestedBlockHeaders, requestedFinalBlockHeaders } func (mp *metaProcessor) computeMissingHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { - missingHeaders := make(map[uint32][][]byte) - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } - - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - hdr, err := process.GetShardHeaderFromPool( - shardData.HeaderHash, - mp.dataPool.ShardHeaders()) - if err != nil { - missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) - continue - } - - if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { - mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce - } - } - - return missingHeaders + missingHeaders := make(map[uint32][][]byte) + mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + mp.currHighestShardHdrsNonces[i] = uint64(0) + } + + for i := 0; i < len(metaBlock.ShardInfo); i++ { + shardData := metaBlock.ShardInfo[i] + hdr, err := process.GetShardHeaderFromPool( + shardData.HeaderHash, + mp.dataPool.ShardHeaders()) + if err != nil { + missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) + continue + } + + if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { + mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce + } + } + + return missingHeaders } func (mp *metaProcessor) checkAndProcessShardMiniBlockHeader( - headerHash []byte, - shardMiniBlockHeader *block.ShardMiniBlockHeader, - hdrPool storage.Cacher, - round uint64, - shardId uint32, + headerHash []byte, + shardMiniBlockHeader *block.ShardMiniBlockHeader, + hdrPool storage.Cacher, + round uint64, + shardId uint32, ) error { - if hdrPool == nil || hdrPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - // TODO: real processing has to be done here, using metachain state - return nil + if hdrPool == nil || hdrPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + // TODO: real processing has to be done here, using metachain state + return nil } func (mp *metaProcessor) createShardInfo( - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) ([]block.ShardData, error) { - shardInfo := make([]block.ShardData, 0) - lastPushedHdr := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - - if mp.accounts.JournalLen() != 0 { - return nil, process.ErrAccountStateDirty - } - - if !haveTime() { - log.Info(fmt.Sprintf("time is up after entered in createShardInfo method\n")) - return shardInfo, nil - } - - hdrPool := mp.dataPool.ShardHeaders() - if hdrPool == nil { - return nil, process.ErrNilHeadersDataPool - } - - mbHdrs := uint32(0) - - timeBefore := time.Now() - orderedHdrs, orderedHdrHashes, sortedHdrPerShard, err := mp.getOrderedHdrs(round) - timeAfter := time.Now() - - if !haveTime() { - log.Info(fmt.Sprintf("time is up after ordered %d hdrs in %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) - return shardInfo, nil - } - - log.Debug(fmt.Sprintf("time elapsed to ordered %d hdrs: %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) - - if err != nil { - return nil, err - } - - log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) - - // save last committed hdr for verification - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, process.ErrNotarizedHdrsSliceIsNil - } - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - lastPushedHdr[shardId] = mp.lastNotarizedHdrForShard(shardId) - } - mp.mutNotarizedHdrs.RUnlock() - - for index := range orderedHdrs { - shId := orderedHdrs[index].ShardId - - lastHdr, ok := lastPushedHdr[shId].(*block.Header) - if !ok { - continue - } - - isFinal, _ := mp.isShardHeaderValidFinal(orderedHdrs[index], lastHdr, sortedHdrPerShard[shId]) - if !isFinal { - continue - } - - lastPushedHdr[shId] = orderedHdrs[index] - - shardData := block.ShardData{} - shardData.ShardMiniBlockHeaders = make([]block.ShardMiniBlockHeader, 0) - shardData.TxCount = orderedHdrs[index].TxCount - shardData.ShardId = orderedHdrs[index].ShardId - shardData.HeaderHash = orderedHdrHashes[index] - - snapshot := mp.accounts.JournalLen() - - for i := 0; i < len(orderedHdrs[index].MiniBlockHeaders); i++ { - if !haveTime() { - break - } - - shardMiniBlockHeader := block.ShardMiniBlockHeader{} - shardMiniBlockHeader.SenderShardId = orderedHdrs[index].MiniBlockHeaders[i].SenderShardID - shardMiniBlockHeader.ReceiverShardId = orderedHdrs[index].MiniBlockHeaders[i].ReceiverShardID - shardMiniBlockHeader.Hash = orderedHdrs[index].MiniBlockHeaders[i].Hash - shardMiniBlockHeader.TxCount = orderedHdrs[index].MiniBlockHeaders[i].TxCount - - // execute shard miniblock to change the trie root hash - err := mp.checkAndProcessShardMiniBlockHeader( - orderedHdrHashes[index], - &shardMiniBlockHeader, - hdrPool, - round, - shardData.ShardId, - ) - - if err != nil { - log.Error(err.Error()) - err = mp.accounts.RevertToSnapshot(snapshot) - if err != nil { - log.Error(err.Error()) - } - break - } - - shardData.ShardMiniBlockHeaders = append(shardData.ShardMiniBlockHeaders, shardMiniBlockHeader) - mbHdrs++ - - recordsAddedInHeader := mbHdrs + uint32(len(shardInfo)) - spaceRemained := int32(maxItemsInBlock) - int32(recordsAddedInHeader) - 1 - - if spaceRemained <= 0 { - log.Info(fmt.Sprintf("max hdrs accepted in one block is reached: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil - } - } - - if !haveTime() { - log.Info(fmt.Sprintf("time is up: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil - } - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil + shardInfo := make([]block.ShardData, 0) + lastPushedHdr := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + + if mp.accounts.JournalLen() != 0 { + return nil, process.ErrAccountStateDirty + } + + if !haveTime() { + log.Info(fmt.Sprintf("time is up after entered in createShardInfo method\n")) + return shardInfo, nil + } + + hdrPool := mp.dataPool.ShardHeaders() + if hdrPool == nil { + return nil, process.ErrNilHeadersDataPool + } + + mbHdrs := uint32(0) + + timeBefore := time.Now() + orderedHdrs, orderedHdrHashes, sortedHdrPerShard, err := mp.getOrderedHdrs(round) + timeAfter := time.Now() + + if !haveTime() { + log.Info(fmt.Sprintf("time is up after ordered %d hdrs in %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) + return shardInfo, nil + } + + log.Debug(fmt.Sprintf("time elapsed to ordered %d hdrs: %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) + + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) + + // save last committed hdr for verification + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, process.ErrNotarizedHdrsSliceIsNil + } + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + lastPushedHdr[shardId] = mp.lastNotarizedHdrForShard(shardId) + } + mp.mutNotarizedHdrs.RUnlock() + + for index := range orderedHdrs { + shId := orderedHdrs[index].ShardId + + lastHdr, ok := lastPushedHdr[shId].(*block.Header) + if !ok { + continue + } + + isFinal, _ := mp.isShardHeaderValidFinal(orderedHdrs[index], lastHdr, sortedHdrPerShard[shId]) + if !isFinal { + continue + } + + lastPushedHdr[shId] = orderedHdrs[index] + + shardData := block.ShardData{} + shardData.ShardMiniBlockHeaders = make([]block.ShardMiniBlockHeader, 0) + shardData.TxCount = orderedHdrs[index].TxCount + shardData.ShardId = orderedHdrs[index].ShardId + shardData.HeaderHash = orderedHdrHashes[index] + + snapshot := mp.accounts.JournalLen() + + for i := 0; i < len(orderedHdrs[index].MiniBlockHeaders); i++ { + if !haveTime() { + break + } + + shardMiniBlockHeader := block.ShardMiniBlockHeader{} + shardMiniBlockHeader.SenderShardId = orderedHdrs[index].MiniBlockHeaders[i].SenderShardID + shardMiniBlockHeader.ReceiverShardId = orderedHdrs[index].MiniBlockHeaders[i].ReceiverShardID + shardMiniBlockHeader.Hash = orderedHdrs[index].MiniBlockHeaders[i].Hash + shardMiniBlockHeader.TxCount = orderedHdrs[index].MiniBlockHeaders[i].TxCount + + // execute shard miniblock to change the trie root hash + err := mp.checkAndProcessShardMiniBlockHeader( + orderedHdrHashes[index], + &shardMiniBlockHeader, + hdrPool, + round, + shardData.ShardId, + ) + + if err != nil { + log.Error(err.Error()) + err = mp.accounts.RevertToSnapshot(snapshot) + if err != nil { + log.Error(err.Error()) + } + break + } + + shardData.ShardMiniBlockHeaders = append(shardData.ShardMiniBlockHeaders, shardMiniBlockHeader) + mbHdrs++ + + recordsAddedInHeader := mbHdrs + uint32(len(shardInfo)) + spaceRemained := int32(maxItemsInBlock) - int32(recordsAddedInHeader) - 1 + + if spaceRemained <= 0 { + log.Info(fmt.Sprintf("max hdrs accepted in one block is reached: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil + } + } + + if !haveTime() { + log.Info(fmt.Sprintf("time is up: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil + } + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil } func (mp *metaProcessor) createPeerInfo() ([]block.PeerData, error) { - // TODO: to be implemented - peerInfo := make([]block.PeerData, 0) - return peerInfo, nil + // TODO: to be implemented + peerInfo := make([]block.PeerData, 0) + return peerInfo, nil } // CreateBlockHeader creates a miniblock header list given a block body func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) - // TODO: add PrevRandSeed and RandSeed when BLS signing is completed - header := &block.MetaBlock{ - ShardInfo: make([]block.ShardData, 0), - PeerInfo: make([]block.PeerData, 0), - PrevRandSeed: make([]byte, 0), - RandSeed: make([]byte, 0), - } - - defer func() { - go mp.checkAndRequestIfShardHeadersMissing(round) - }() - - shardInfo, err := mp.createShardInfo(mp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) - if err != nil { - return nil, err - } - - peerInfo, err := mp.createPeerInfo() - if err != nil { - return nil, err - } - - header.ShardInfo = shardInfo - header.PeerInfo = peerInfo - header.RootHash = mp.getRootHash() - header.TxCount = getTxCount(shardInfo) - - mp.blockSizeThrottler.Add( - round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) - - return header, nil + log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + // TODO: add PrevRandSeed and RandSeed when BLS signing is completed + header := &block.MetaBlock{ + ShardInfo: make([]block.ShardData, 0), + PeerInfo: make([]block.PeerData, 0), + PrevRandSeed: make([]byte, 0), + RandSeed: make([]byte, 0), + } + + defer func() { + go mp.checkAndRequestIfShardHeadersMissing(round) + }() + + shardInfo, err := mp.createShardInfo(mp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + if err != nil { + return nil, err + } + + peerInfo, err := mp.createPeerInfo() + if err != nil { + return nil, err + } + + header.ShardInfo = shardInfo + header.PeerInfo = peerInfo + header.RootHash = mp.getRootHash() + header.TxCount = getTxCount(shardInfo) + + mp.blockSizeThrottler.Add( + round, + core.Max(header.ItemsInBody(), header.ItemsInHeader())) + + return header, nil } func (mp *metaProcessor) waitForBlockHeaders(waitTime time.Duration) error { - select { - case <-mp.chRcvAllHdrs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-mp.chRcvAllHdrs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } func (mp *metaProcessor) displayMetaBlock(header *block.MetaBlock) { - if header == nil { - return - } + if header == nil { + return + } - headerHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, header) - if err != nil { - log.Error(err.Error()) - return - } + headerHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, header) + if err != nil { + log.Error(err.Error()) + return + } - mp.displayLogInfo(header, headerHash) + mp.displayLogInfo(header, headerHash) } func (mp *metaProcessor) displayLogInfo( - header *block.MetaBlock, - headerHash []byte, + header *block.MetaBlock, + headerHash []byte, ) { - dispHeader, dispLines := createDisplayableMetaHeader(header) - - tblString, err := display.CreateTableString(dispHeader, dispLines) - if err != nil { - log.Error(err.Error()) - return - } - - shardMBHeaderCounterMutex.RLock() - tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal shard MB headers "+ - "processed until now: %d. Total shard MB headers processed for this block: %d. Total shard headers remained in pool: %d\n", - core.ToB64(headerHash), - shardMBHeadersTotalProcessed, - shardMBHeadersCurrentBlockProcessed, - mp.getHeadersCountInPool()) - shardMBHeaderCounterMutex.RUnlock() - - log.Info(tblString) + dispHeader, dispLines := createDisplayableMetaHeader(header) + + tblString, err := display.CreateTableString(dispHeader, dispLines) + if err != nil { + log.Error(err.Error()) + return + } + + shardMBHeaderCounterMutex.RLock() + tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal shard MB headers "+ + "processed until now: %d. Total shard MB headers processed for this block: %d. Total shard headers remained in pool: %d\n", + core.ToB64(headerHash), + shardMBHeadersTotalProcessed, + shardMBHeadersCurrentBlockProcessed, + mp.getHeadersCountInPool()) + shardMBHeaderCounterMutex.RUnlock() + + log.Info(tblString) } func createDisplayableMetaHeader( - header *block.MetaBlock, + header *block.MetaBlock, ) ([]string, []*display.LineData) { - tableHeader := []string{"Part", "Parameter", "Value"} + tableHeader := []string{"Part", "Parameter", "Value"} - lines := displayHeader(header) + lines := displayHeader(header) - metaLines := make([]*display.LineData, 0) - metaLines = append(metaLines, display.NewLineData(false, []string{ - "Header", - "Block type", - "MetaBlock"})) - metaLines = append(metaLines, lines...) + metaLines := make([]*display.LineData, 0) + metaLines = append(metaLines, display.NewLineData(false, []string{ + "Header", + "Block type", + "MetaBlock"})) + metaLines = append(metaLines, lines...) - metaLines = displayShardInfo(metaLines, header) - return tableHeader, metaLines + metaLines = displayShardInfo(metaLines, header) + return tableHeader, metaLines } func displayShardInfo(lines []*display.LineData, header *block.MetaBlock) []*display.LineData { - shardMBHeaderCounterMutex.Lock() - shardMBHeadersCurrentBlockProcessed = 0 - shardMBHeaderCounterMutex.Unlock() - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - lines = append(lines, display.NewLineData(false, []string{ - fmt.Sprintf("ShardData_%d", shardData.ShardId), - "Header hash", - base64.StdEncoding.EncodeToString(shardData.HeaderHash)})) - - if shardData.ShardMiniBlockHeaders == nil || len(shardData.ShardMiniBlockHeaders) == 0 { - lines = append(lines, display.NewLineData(false, []string{ - "", "ShardMiniBlockHeaders", ""})) - } - - shardMBHeaderCounterMutex.Lock() - shardMBHeadersCurrentBlockProcessed += len(shardData.ShardMiniBlockHeaders) - shardMBHeadersTotalProcessed += len(shardData.ShardMiniBlockHeaders) - shardMBHeaderCounterMutex.Unlock() - - for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { - if j == 0 || j >= len(shardData.ShardMiniBlockHeaders)-1 { - senderShard := shardData.ShardMiniBlockHeaders[j].SenderShardId - receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId - lines = append(lines, display.NewLineData(false, []string{ - "", - fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), - core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) - } else if j == 1 { - lines = append(lines, display.NewLineData(false, []string{ - "", - fmt.Sprintf("..."), - fmt.Sprintf("...")})) - } - } - - lines[len(lines)-1].HorizontalRuleAfter = true - } - - return lines + shardMBHeaderCounterMutex.Lock() + shardMBHeadersCurrentBlockProcessed = 0 + shardMBHeaderCounterMutex.Unlock() + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + + lines = append(lines, display.NewLineData(false, []string{ + fmt.Sprintf("ShardData_%d", shardData.ShardId), + "Header hash", + base64.StdEncoding.EncodeToString(shardData.HeaderHash)})) + + if shardData.ShardMiniBlockHeaders == nil || len(shardData.ShardMiniBlockHeaders) == 0 { + lines = append(lines, display.NewLineData(false, []string{ + "", "ShardMiniBlockHeaders", ""})) + } + + shardMBHeaderCounterMutex.Lock() + shardMBHeadersCurrentBlockProcessed += len(shardData.ShardMiniBlockHeaders) + shardMBHeadersTotalProcessed += len(shardData.ShardMiniBlockHeaders) + shardMBHeaderCounterMutex.Unlock() + + for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { + if j == 0 || j >= len(shardData.ShardMiniBlockHeaders)-1 { + senderShard := shardData.ShardMiniBlockHeaders[j].SenderShardId + receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId + lines = append(lines, display.NewLineData(false, []string{ + "", + fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), + core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) + } else if j == 1 { + lines = append(lines, display.NewLineData(false, []string{ + "", + fmt.Sprintf("..."), + fmt.Sprintf("...")})) + } + } + + lines[len(lines)-1].HorizontalRuleAfter = true + } + + return lines } // MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination func (mp *metaProcessor) MarshalizedDataToBroadcast( - header data.HeaderHandler, - bodyHandler data.BodyHandler, + header data.HeaderHandler, + bodyHandler data.BodyHandler, ) (map[uint32][]byte, map[string][][]byte, error) { - mrsData := make(map[uint32][]byte) - mrsTxs := make(map[string][][]byte) + mrsData := make(map[uint32][]byte) + mrsTxs := make(map[string][][]byte) - // send headers which can validate the current header + // send headers which can validate the current header - return mrsData, mrsTxs, nil + return mrsData, mrsTxs, nil } func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte, map[uint32][]*block.Header, error) { - hdrStore := mp.dataPool.ShardHeaders() - if hdrStore == nil { - return nil, nil, nil, process.ErrNilCacher - } - - hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) - headersMap := make(map[uint32][]*block.Header) - headers := make([]*block.Header, 0) - hdrHashes := make([][]byte, 0) - - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, nil, nil, process.ErrNotarizedHdrsSliceIsNil - } - - // get keys and arrange them into shards - for _, key := range hdrStore.Keys() { - val, _ := hdrStore.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.Header) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - - currShardId := hdr.ShardId - if mp.lastNotarizedHdrForShard(currShardId) == nil { - continue - } - - if hdr.GetRound() <= mp.lastNotarizedHdrForShard(currShardId).GetRound() { - continue - } - - if hdr.GetNonce() <= mp.lastNotarizedHdrForShard(currShardId).GetNonce() { - continue - } - - hashAndBlockMap[currShardId] = append(hashAndBlockMap[currShardId], - &hashAndHdr{hdr: hdr, hash: key}) - } - mp.mutNotarizedHdrs.RUnlock() - - // sort headers for each shard - maxHdrLen := 0 - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if len(hdrsForShard) == 0 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].hdr.GetNonce() < hdrsForShard[j].hdr.GetNonce() - }) - - tmpHdrLen := len(hdrsForShard) - if maxHdrLen < tmpHdrLen { - maxHdrLen = tmpHdrLen - } - } - - // copy from map to lists - equality between number of headers per shard - for i := 0; i < maxHdrLen; i++ { - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if i >= len(hdrsForShard) { - continue - } - - hdr, ok := hdrsForShard[i].hdr.(*block.Header) - if !ok { - continue - } - - headers = append(headers, hdr) - hdrHashes = append(hdrHashes, hdrsForShard[i].hash) - headersMap[shardId] = append(headersMap[shardId], hdr) - } - } - - return headers, hdrHashes, headersMap, nil + hdrStore := mp.dataPool.ShardHeaders() + if hdrStore == nil { + return nil, nil, nil, process.ErrNilCacher + } + + hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) + headersMap := make(map[uint32][]*block.Header) + headers := make([]*block.Header, 0) + hdrHashes := make([][]byte, 0) + + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, nil, nil, process.ErrNotarizedHdrsSliceIsNil + } + + // get keys and arrange them into shards + for _, key := range hdrStore.Keys() { + val, _ := hdrStore.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.Header) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + + currShardId := hdr.ShardId + if mp.lastNotarizedHdrForShard(currShardId) == nil { + continue + } + + if hdr.GetRound() <= mp.lastNotarizedHdrForShard(currShardId).GetRound() { + continue + } + + if hdr.GetNonce() <= mp.lastNotarizedHdrForShard(currShardId).GetNonce() { + continue + } + + hashAndBlockMap[currShardId] = append(hashAndBlockMap[currShardId], + &hashAndHdr{hdr: hdr, hash: key}) + } + mp.mutNotarizedHdrs.RUnlock() + + // sort headers for each shard + maxHdrLen := 0 + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := hashAndBlockMap[shardId] + if len(hdrsForShard) == 0 { + continue + } + + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].hdr.GetNonce() < hdrsForShard[j].hdr.GetNonce() + }) + + tmpHdrLen := len(hdrsForShard) + if maxHdrLen < tmpHdrLen { + maxHdrLen = tmpHdrLen + } + } + + // copy from map to lists - equality between number of headers per shard + for i := 0; i < maxHdrLen; i++ { + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := hashAndBlockMap[shardId] + if i >= len(hdrsForShard) { + continue + } + + hdr, ok := hdrsForShard[i].hdr.(*block.Header) + if !ok { + continue + } + + headers = append(headers, hdr) + hdrHashes = append(hdrHashes, hdrsForShard[i].hash) + headersMap[shardId] = append(headersMap[shardId], hdr) + } + } + + return headers, hdrHashes, headersMap, nil } func getTxCount(shardInfo []block.ShardData) uint32 { - txs := uint32(0) - for i := 0; i < len(shardInfo); i++ { - for j := 0; j < len(shardInfo[i].ShardMiniBlockHeaders); j++ { - txs += shardInfo[i].ShardMiniBlockHeaders[j].TxCount - } - } - - return txs + txs := uint32(0) + for i := 0; i < len(shardInfo); i++ { + for j := 0; j < len(shardInfo[i].ShardMiniBlockHeaders); j++ { + txs += shardInfo[i].ShardMiniBlockHeaders[j].TxCount + } + } + + return txs } func (mp *metaProcessor) getHeadersCountInPool() int { - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil { - log.Error(process.ErrNilHeadersDataPool.Error()) - return -1 - } + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil { + log.Error(process.ErrNilHeadersDataPool.Error()) + return -1 + } - return headerPool.Len() + return headerPool.Len() } // DecodeBlockBody method decodes block body from a given byte array func (mp *metaProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.MetaBlockBody + var body block.MetaBlockBody - err := mp.marshalizer.Unmarshal(&body, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := mp.marshalizer.Unmarshal(&body, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &body + return &body } // DecodeBlockHeader method decodes block header from a given byte array func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.MetaBlock + var header block.MetaBlock - err := mp.marshalizer.Unmarshal(&header, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := mp.marshalizer.Unmarshal(&header, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &header + return &header } // IsInterfaceNil returns true if there is no value under the interface func (mp *metaProcessor) IsInterfaceNil() bool { - if mp == nil { - return true - } - return false + if mp == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 44b39f032b5..5f4e9c742df 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -172,7 +172,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] - if miniBlock.Type != block.RewardsBlockType { + if miniBlock.Type != block.RewardsBlock { continue } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 225d98e76f4..517f99304f1 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -420,7 +420,7 @@ func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.Transacti rtxh.mut.Lock() rewardTxPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range rtxh.rewardTxsFromBlock { + for txHash, txInfo := range rtxh.rewardTxsForBlock { senderShard := txInfo.ShardId receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) @@ -439,11 +439,3 @@ func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.Transacti return rewardTxPool } - -// IsInterfaceNil returns true if there is no value under the interface -func (rtxh *rewardsHandler) IsInterfaceNil() bool { - if rtxh == nil { - return true - } - return false -} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index cc2bfb40c3b..a81f9d39263 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -444,7 +444,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) - rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlockType) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) for hash, tx := range scPool { diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 08c60678172..4db7cfafb1f 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -528,7 +528,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( log.Error("could not create reward mini-blocks") } - rewardsPreProc := tc.getPreProcessor(block.RewardsBlockType) + rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) for _, mb := range rewardMb { err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) if err != nil { diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index aea53e55a26..53de299d85b 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -109,7 +109,6 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, - ppcm.shardCoordinator, ppcm.hasher, ppcm.marshalizer, ppcm.shardCoordinator, diff --git a/process/interface.go b/process/interface.go index 006c3e47d01..740cfb23151 100644 --- a/process/interface.go +++ b/process/interface.go @@ -119,6 +119,7 @@ type SpecialAddressHandler interface { SetElrondCommunityAddress(elrond []byte) ElrondCommunityAddress() []byte SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) + ConsensusRewardAddresses() []string LeaderAddress() []byte BurnAddress() []byte ShardIdForAddress([]byte) (uint32, error) diff --git a/process/transaction/process.go b/process/transaction/process.go index 2562a4c9226..86638aa9807 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -6,14 +6,11 @@ import ( "sync" "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/sharding" ) From 20c2caa2c303e559da1f1833c2650420098f8393 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 10 Sep 2019 11:33:47 +0300 Subject: [PATCH 091/234] format code --- cmd/node/factory/structs.go | 3158 +++++++-------- config/config.go | 208 +- consensus/mock/blockProcessorMock.go | 52 +- .../spos/commonSubround/subroundStartRound.go | 326 +- data/address/specialAddresses.go | 116 +- .../shard/resolversContainerFactory.go | 918 ++--- .../shard/resolversContainerFactory_test.go | 702 ++-- integrationTests/mock/blockProcessorMock.go | 100 +- .../mock/specialAddressHandlerMock.go | 77 +- .../interceptedHeadersSigVerification_test.go | 292 +- .../smartContract/testInitilalizer.go | 1588 ++++---- .../interceptedResolvedBulkTx_test.go | 638 ++-- .../transaction/interceptedResolvedTx_test.go | 344 +- integrationTests/testInitializer.go | 1634 ++++---- integrationTests/testProcessorNode.go | 1398 +++---- .../testProcessorNodeWithMultisigner.go | 326 +- node/mock/blockProcessorStub.go | 62 +- process/block/baseProcess.go | 818 ++-- process/block/displayBlock.go | 346 +- .../block/preprocess/rewardTxPreProcessor.go | 108 +- process/block/preprocess/rewardsHandler.go | 6 +- .../block/preprocess/rewardsHandler_test.go | 12 +- process/block/preprocess/transactions_test.go | 1448 +++---- process/block/shardblock.go | 2793 +++++++------- process/block/shardblock_test.go | 30 +- process/coordinator/process.go | 96 +- process/coordinator/process_test.go | 3386 ++++++++--------- process/errors.go | 2 +- .../shard/interceptorsContainerFactory.go | 770 ++-- .../interceptorsContainerFactory_test.go | 1116 +++--- .../intermediateProcessorsContainerFactory.go | 196 +- ...rmediateProcessorsContainerFactory_test.go | 228 +- process/interface.go | 36 +- process/mock/blockProcessorMock.go | 64 +- .../interceptedRewardTransaction.go | 194 +- process/rewardTransaction/interceptor.go | 238 +- process/rewardTransaction/process.go | 76 +- process/smartContract/process.go | 1344 +++---- process/transaction/process.go | 62 +- 39 files changed, 12653 insertions(+), 12655 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 0944078118d..ab109a1418e 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1,1850 +1,1850 @@ package factory import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "math/big" - "path/filepath" - "time" - - "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/round" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/genesis" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - blsMultiSig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/address" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - shardfactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/hashing/blake2b" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/ntp" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - factoryP2P "github.com/ElrondNetwork/elrond-go/p2p/libp2p/factory" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - processSync "github.com/ElrondNetwork/elrond-go/process/sync" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" - factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" - "github.com/ElrondNetwork/elrond-go/statusHandler/view" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/urfave/cli" + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "math/big" + "path/filepath" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/round" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/genesis" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + blsMultiSig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/address" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + shardfactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/ntp" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + factoryP2P "github.com/ElrondNetwork/elrond-go/p2p/libp2p/factory" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + processSync "github.com/ElrondNetwork/elrond-go/process/sync" + "github.com/ElrondNetwork/elrond-go/process/track" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" + factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" + "github.com/ElrondNetwork/elrond-go/statusHandler/view" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/urfave/cli" ) const ( - // BlsHashSize specifies the hash size for using bls scheme - BlsHashSize = 16 + // BlsHashSize specifies the hash size for using bls scheme + BlsHashSize = 16 - // BlsConsensusType specifies te signature scheme used in the consensus - BlsConsensusType = "bls" + // BlsConsensusType specifies te signature scheme used in the consensus + BlsConsensusType = "bls" - // BnConsensusType specifies te signature scheme used in the consensus - BnConsensusType = "bn" + // BnConsensusType specifies te signature scheme used in the consensus + BnConsensusType = "bn" - // MaxTxsToRequest specifies the maximum number of txs to request - MaxTxsToRequest = 100 + // MaxTxsToRequest specifies the maximum number of txs to request + MaxTxsToRequest = 100 ) var log = logger.DefaultLogger() // Network struct holds the network components of the Elrond protocol type Network struct { - NetMessenger p2p.Messenger + NetMessenger p2p.Messenger } // Core struct holds the core components of the Elrond protocol type Core struct { - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - Trie data.Trie - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - StatusHandler core.AppStatusHandler + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Trie data.Trie + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + StatusHandler core.AppStatusHandler } // State struct holds the state components of the Elrond protocol type State struct { - AddressConverter state.AddressConverter - AccountsAdapter state.AccountsAdapter - InBalanceForShard map[string]*big.Int + AddressConverter state.AddressConverter + AccountsAdapter state.AccountsAdapter + InBalanceForShard map[string]*big.Int } // Data struct holds the data components of the Elrond protocol type Data struct { - Blkc data.ChainHandler - Store dataRetriever.StorageService - Datapool dataRetriever.PoolsHolder - MetaDatapool dataRetriever.MetaPoolsHolder + Blkc data.ChainHandler + Store dataRetriever.StorageService + Datapool dataRetriever.PoolsHolder + MetaDatapool dataRetriever.MetaPoolsHolder } // Crypto struct holds the crypto components of the Elrond protocol type Crypto struct { - TxSingleSigner crypto.SingleSigner - SingleSigner crypto.SingleSigner - MultiSigner crypto.MultiSigner - TxSignKeyGen crypto.KeyGenerator - TxSignPrivKey crypto.PrivateKey - TxSignPubKey crypto.PublicKey - InitialPubKeys map[uint32][]string + TxSingleSigner crypto.SingleSigner + SingleSigner crypto.SingleSigner + MultiSigner crypto.MultiSigner + TxSignKeyGen crypto.KeyGenerator + TxSignPrivKey crypto.PrivateKey + TxSignPubKey crypto.PublicKey + InitialPubKeys map[uint32][]string } // Process struct holds the process components of the Elrond protocol type Process struct { - InterceptorsContainer process.InterceptorsContainer - ResolversFinder dataRetriever.ResolversFinder - Rounder consensus.Rounder - ForkDetector process.ForkDetector - BlockProcessor process.BlockProcessor - BlockTracker process.BlocksTracker + InterceptorsContainer process.InterceptorsContainer + ResolversFinder dataRetriever.ResolversFinder + Rounder consensus.Rounder + ForkDetector process.ForkDetector + BlockProcessor process.BlockProcessor + BlockTracker process.BlocksTracker } type coreComponentsFactoryArgs struct { - config *config.Config - uniqueID string + config *config.Config + uniqueID string } // NewCoreComponentsFactoryArgs initializes the arguments necessary for creating the core components func NewCoreComponentsFactoryArgs(config *config.Config, uniqueID string) *coreComponentsFactoryArgs { - return &coreComponentsFactoryArgs{ - config: config, - uniqueID: uniqueID, - } + return &coreComponentsFactoryArgs{ + config: config, + uniqueID: uniqueID, + } } // CoreComponentsFactory creates the core components func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { - hasher, err := getHasherFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create hasher: " + err.Error()) - } - - marshalizer, err := getMarshalizerFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create marshalizer: " + err.Error()) - } - - merkleTrie, err := getTrie(args.config.AccountsTrieStorage, marshalizer, hasher, args.uniqueID) - if err != nil { - return nil, errors.New("error creating trie: " + err.Error()) - } - uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() - - return &Core{ - Hasher: hasher, - Marshalizer: marshalizer, - Trie: merkleTrie, - Uint64ByteSliceConverter: uint64ByteSliceConverter, - StatusHandler: statusHandler.NewNilStatusHandler(), - }, nil + hasher, err := getHasherFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create hasher: " + err.Error()) + } + + marshalizer, err := getMarshalizerFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create marshalizer: " + err.Error()) + } + + merkleTrie, err := getTrie(args.config.AccountsTrieStorage, marshalizer, hasher, args.uniqueID) + if err != nil { + return nil, errors.New("error creating trie: " + err.Error()) + } + uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() + + return &Core{ + Hasher: hasher, + Marshalizer: marshalizer, + Trie: merkleTrie, + Uint64ByteSliceConverter: uint64ByteSliceConverter, + StatusHandler: statusHandler.NewNilStatusHandler(), + }, nil } type stateComponentsFactoryArgs struct { - config *config.Config - genesisConfig *sharding.Genesis - shardCoordinator sharding.Coordinator - core *Core + config *config.Config + genesisConfig *sharding.Genesis + shardCoordinator sharding.Coordinator + core *Core } // NewStateComponentsFactoryArgs initializes the arguments necessary for creating the state components func NewStateComponentsFactoryArgs( - config *config.Config, - genesisConfig *sharding.Genesis, - shardCoordinator sharding.Coordinator, - core *Core, + config *config.Config, + genesisConfig *sharding.Genesis, + shardCoordinator sharding.Coordinator, + core *Core, ) *stateComponentsFactoryArgs { - return &stateComponentsFactoryArgs{ - config: config, - genesisConfig: genesisConfig, - shardCoordinator: shardCoordinator, - core: core, - } + return &stateComponentsFactoryArgs{ + config: config, + genesisConfig: genesisConfig, + shardCoordinator: shardCoordinator, + core: core, + } } // StateComponentsFactory creates the state components func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { - addressConverter, err := addressConverters.NewPlainAddressConverter( - args.config.Address.Length, - args.config.Address.Prefix, - ) - - if err != nil { - return nil, errors.New("could not create address converter: " + err.Error()) - } - - accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) - if err != nil { - return nil, errors.New("could not create account factory: " + err.Error()) - } - - accountsAdapter, err := state.NewAccountsDB(args.core.Trie, args.core.Hasher, args.core.Marshalizer, accountFactory) - if err != nil { - return nil, errors.New("could not create accounts adapter: " + err.Error()) - } - - inBalanceForShard, err := args.genesisConfig.InitialNodesBalances(args.shardCoordinator, addressConverter) - if err != nil { - return nil, errors.New("initial balances could not be processed " + err.Error()) - } - - return &State{ - AddressConverter: addressConverter, - AccountsAdapter: accountsAdapter, - InBalanceForShard: inBalanceForShard, - }, nil + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.config.Address.Length, + args.config.Address.Prefix, + ) + + if err != nil { + return nil, errors.New("could not create address converter: " + err.Error()) + } + + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) + if err != nil { + return nil, errors.New("could not create account factory: " + err.Error()) + } + + accountsAdapter, err := state.NewAccountsDB(args.core.Trie, args.core.Hasher, args.core.Marshalizer, accountFactory) + if err != nil { + return nil, errors.New("could not create accounts adapter: " + err.Error()) + } + + inBalanceForShard, err := args.genesisConfig.InitialNodesBalances(args.shardCoordinator, addressConverter) + if err != nil { + return nil, errors.New("initial balances could not be processed " + err.Error()) + } + + return &State{ + AddressConverter: addressConverter, + AccountsAdapter: accountsAdapter, + InBalanceForShard: inBalanceForShard, + }, nil } type dataComponentsFactoryArgs struct { - config *config.Config - shardCoordinator sharding.Coordinator - core *Core - uniqueID string + config *config.Config + shardCoordinator sharding.Coordinator + core *Core + uniqueID string } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components func NewDataComponentsFactoryArgs( - config *config.Config, - shardCoordinator sharding.Coordinator, - core *Core, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + core *Core, + uniqueID string, ) *dataComponentsFactoryArgs { - return &dataComponentsFactoryArgs{ - config: config, - shardCoordinator: shardCoordinator, - core: core, - uniqueID: uniqueID, - } + return &dataComponentsFactoryArgs{ + config: config, + shardCoordinator: shardCoordinator, + core: core, + uniqueID: uniqueID, + } } // DataComponentsFactory creates the data components func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { - var datapool dataRetriever.PoolsHolder - var metaDatapool dataRetriever.MetaPoolsHolder - blkc, err := createBlockChainFromConfig(args.config, args.shardCoordinator, args.core.StatusHandler) - if err != nil { - return nil, errors.New("could not create block chain: " + err.Error()) - } - - store, err := createDataStoreFromConfig(args.config, args.shardCoordinator, args.uniqueID) - if err != nil { - return nil, errors.New("could not create local data store: " + err.Error()) - } - - if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { - datapool, err = createShardDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) - if err != nil { - return nil, errors.New("could not create shard data pools: " + err.Error()) - } - } - if args.shardCoordinator.SelfId() == sharding.MetachainShardId { - metaDatapool, err = createMetaDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) - if err != nil { - return nil, errors.New("could not create shard data pools: " + err.Error()) - } - } - if datapool == nil && metaDatapool == nil { - return nil, errors.New("could not create data pools: ") - } - - return &Data{ - Blkc: blkc, - Store: store, - Datapool: datapool, - MetaDatapool: metaDatapool, - }, nil + var datapool dataRetriever.PoolsHolder + var metaDatapool dataRetriever.MetaPoolsHolder + blkc, err := createBlockChainFromConfig(args.config, args.shardCoordinator, args.core.StatusHandler) + if err != nil { + return nil, errors.New("could not create block chain: " + err.Error()) + } + + store, err := createDataStoreFromConfig(args.config, args.shardCoordinator, args.uniqueID) + if err != nil { + return nil, errors.New("could not create local data store: " + err.Error()) + } + + if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { + datapool, err = createShardDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + if err != nil { + return nil, errors.New("could not create shard data pools: " + err.Error()) + } + } + if args.shardCoordinator.SelfId() == sharding.MetachainShardId { + metaDatapool, err = createMetaDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + if err != nil { + return nil, errors.New("could not create shard data pools: " + err.Error()) + } + } + if datapool == nil && metaDatapool == nil { + return nil, errors.New("could not create data pools: ") + } + + return &Data{ + Blkc: blkc, + Store: store, + Datapool: datapool, + MetaDatapool: metaDatapool, + }, nil } type cryptoComponentsFactoryArgs struct { - ctx *cli.Context - config *config.Config - nodesConfig *sharding.NodesSetup - shardCoordinator sharding.Coordinator - keyGen crypto.KeyGenerator - privKey crypto.PrivateKey - log *logger.Logger - initialBalancesSkPemFileName string - txSignSkName string - txSignSkIndexName string + ctx *cli.Context + config *config.Config + nodesConfig *sharding.NodesSetup + shardCoordinator sharding.Coordinator + keyGen crypto.KeyGenerator + privKey crypto.PrivateKey + log *logger.Logger + initialBalancesSkPemFileName string + txSignSkName string + txSignSkIndexName string } // NewCryptoComponentsFactoryArgs initializes the arguments necessary for creating the crypto components func NewCryptoComponentsFactoryArgs( - ctx *cli.Context, - config *config.Config, - nodesConfig *sharding.NodesSetup, - shardCoordinator sharding.Coordinator, - keyGen crypto.KeyGenerator, - privKey crypto.PrivateKey, - log *logger.Logger, - initialBalancesSkPemFileName string, - txSignSkName string, - txSignSkIndexName string, + ctx *cli.Context, + config *config.Config, + nodesConfig *sharding.NodesSetup, + shardCoordinator sharding.Coordinator, + keyGen crypto.KeyGenerator, + privKey crypto.PrivateKey, + log *logger.Logger, + initialBalancesSkPemFileName string, + txSignSkName string, + txSignSkIndexName string, ) *cryptoComponentsFactoryArgs { - return &cryptoComponentsFactoryArgs{ - ctx: ctx, - config: config, - nodesConfig: nodesConfig, - shardCoordinator: shardCoordinator, - keyGen: keyGen, - privKey: privKey, - log: log, - initialBalancesSkPemFileName: initialBalancesSkPemFileName, - txSignSkName: txSignSkName, - txSignSkIndexName: txSignSkIndexName, - } + return &cryptoComponentsFactoryArgs{ + ctx: ctx, + config: config, + nodesConfig: nodesConfig, + shardCoordinator: shardCoordinator, + keyGen: keyGen, + privKey: privKey, + log: log, + initialBalancesSkPemFileName: initialBalancesSkPemFileName, + txSignSkName: txSignSkName, + txSignSkIndexName: txSignSkIndexName, + } } // CryptoComponentsFactory creates the crypto components func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) { - initialPubKeys := args.nodesConfig.InitialNodesPubKeys() - txSingleSigner := &singlesig.SchnorrSigner{} - singleSigner, err := createSingleSigner(args.config) - if err != nil { - return nil, errors.New("could not create singleSigner: " + err.Error()) - } - - multisigHasher, err := getMultisigHasherFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create multisig hasher: " + err.Error()) - } - - currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) - if err != nil { - return nil, errors.New("could not start creation of multiSigner: " + err.Error()) - } - - multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) - if err != nil { - return nil, err - } - - initialBalancesSkPemFileName := args.ctx.GlobalString(args.initialBalancesSkPemFileName) - txSignKeyGen, txSignPrivKey, txSignPubKey, err := GetSigningParams( - args.ctx, - args.log, - args.txSignSkName, - args.txSignSkIndexName, - initialBalancesSkPemFileName, - kyber.NewBlakeSHA256Ed25519()) - if err != nil { - return nil, err - } - args.log.Info("Starting with tx sign public key: " + GetPkEncoded(txSignPubKey)) - - return &Crypto{ - TxSingleSigner: txSingleSigner, - SingleSigner: singleSigner, - MultiSigner: multiSigner, - TxSignKeyGen: txSignKeyGen, - TxSignPrivKey: txSignPrivKey, - TxSignPubKey: txSignPubKey, - InitialPubKeys: initialPubKeys, - }, nil + initialPubKeys := args.nodesConfig.InitialNodesPubKeys() + txSingleSigner := &singlesig.SchnorrSigner{} + singleSigner, err := createSingleSigner(args.config) + if err != nil { + return nil, errors.New("could not create singleSigner: " + err.Error()) + } + + multisigHasher, err := getMultisigHasherFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create multisig hasher: " + err.Error()) + } + + currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) + if err != nil { + return nil, errors.New("could not start creation of multiSigner: " + err.Error()) + } + + multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) + if err != nil { + return nil, err + } + + initialBalancesSkPemFileName := args.ctx.GlobalString(args.initialBalancesSkPemFileName) + txSignKeyGen, txSignPrivKey, txSignPubKey, err := GetSigningParams( + args.ctx, + args.log, + args.txSignSkName, + args.txSignSkIndexName, + initialBalancesSkPemFileName, + kyber.NewBlakeSHA256Ed25519()) + if err != nil { + return nil, err + } + args.log.Info("Starting with tx sign public key: " + GetPkEncoded(txSignPubKey)) + + return &Crypto{ + TxSingleSigner: txSingleSigner, + SingleSigner: singleSigner, + MultiSigner: multiSigner, + TxSignKeyGen: txSignKeyGen, + TxSignPrivKey: txSignPrivKey, + TxSignPubKey: txSignPubKey, + InitialPubKeys: initialPubKeys, + }, nil } // NetworkComponentsFactory creates the network components func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, core *Core) (*Network, error) { - var randReader io.Reader - if p2pConfig.Node.Seed != "" { - randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) - } else { - randReader = rand.Reader - } - - netMessenger, err := createNetMessenger(p2pConfig, log, randReader) - if err != nil { - return nil, err - } - - return &Network{ - NetMessenger: netMessenger, - }, nil + var randReader io.Reader + if p2pConfig.Node.Seed != "" { + randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) + } else { + randReader = rand.Reader + } + + netMessenger, err := createNetMessenger(p2pConfig, log, randReader) + if err != nil { + return nil, err + } + + return &Network{ + NetMessenger: netMessenger, + }, nil } type processComponentsFactoryArgs struct { - genesisConfig *sharding.Genesis - economicsConfig *config.EconomicsConfig - nodesConfig *sharding.NodesSetup - syncer ntp.SyncTimer - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - data *Data - core *Core - crypto *Crypto - state *State - network *Network - coreServiceContainer serviceContainer.Core + genesisConfig *sharding.Genesis + economicsConfig *config.EconomicsConfig + nodesConfig *sharding.NodesSetup + syncer ntp.SyncTimer + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + data *Data + core *Core + crypto *Crypto + state *State + network *Network + coreServiceContainer serviceContainer.Core } // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( - genesisConfig *sharding.Genesis, - economicsConfig *config.EconomicsConfig, - nodesConfig *sharding.NodesSetup, - syncer ntp.SyncTimer, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, - coreServiceContainer serviceContainer.Core, + genesisConfig *sharding.Genesis, + economicsConfig *config.EconomicsConfig, + nodesConfig *sharding.NodesSetup, + syncer ntp.SyncTimer, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, + coreServiceContainer serviceContainer.Core, ) *processComponentsFactoryArgs { - return &processComponentsFactoryArgs{ - genesisConfig: genesisConfig, - economicsConfig: economicsConfig, - nodesConfig: nodesConfig, - syncer: syncer, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - data: data, - core: core, - crypto: crypto, - state: state, - network: network, - coreServiceContainer: coreServiceContainer, - } + return &processComponentsFactoryArgs{ + genesisConfig: genesisConfig, + economicsConfig: economicsConfig, + nodesConfig: nodesConfig, + syncer: syncer, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + data: data, + core: core, + crypto: crypto, + state: state, + network: network, + coreServiceContainer: coreServiceContainer, + } } // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { - interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) - if err != nil { - return nil, err - } - - //TODO refactor all these factory calls - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - return nil, err - } - - resolversContainer, err := resolversContainerFactory.Create() - if err != nil { - return nil, err - } - - resolversFinder, err := containers.NewResolversFinder(resolversContainer, args.shardCoordinator) - if err != nil { - return nil, err - } - - rounder, err := round.NewRound( - time.Unix(args.nodesConfig.StartTime, 0), - args.syncer.CurrentTime(), - time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), - args.syncer) - if err != nil { - return nil, err - } - - forkDetector, err := processSync.NewBasicForkDetector(rounder) - if err != nil { - return nil, err - } - - shardsGenesisBlocks, err := generateGenesisHeadersAndApplyInitialBalances( - args.core, - args.state, - args.shardCoordinator, - args.nodesConfig, - args.genesisConfig, - ) - if err != nil { - return nil, err - } - - err = prepareGenesisBlock(args, shardsGenesisBlocks) - if err != nil { - return nil, err - } - - blockProcessor, blockTracker, err := newBlockProcessorAndTracker( - resolversFinder, - args.shardCoordinator, - args.nodesCoordinator, - args.economicsConfig, - args.data, - args.core, - args.state, - forkDetector, - shardsGenesisBlocks, - args.nodesConfig, - args.coreServiceContainer, - ) - - if err != nil { - return nil, err - } - - return &Process{ - InterceptorsContainer: interceptorsContainer, - ResolversFinder: resolversFinder, - Rounder: rounder, - ForkDetector: forkDetector, - BlockProcessor: blockProcessor, - BlockTracker: blockTracker, - }, nil + interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( + args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) + if err != nil { + return nil, err + } + + //TODO refactor all these factory calls + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + return nil, err + } + + resolversContainer, err := resolversContainerFactory.Create() + if err != nil { + return nil, err + } + + resolversFinder, err := containers.NewResolversFinder(resolversContainer, args.shardCoordinator) + if err != nil { + return nil, err + } + + rounder, err := round.NewRound( + time.Unix(args.nodesConfig.StartTime, 0), + args.syncer.CurrentTime(), + time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), + args.syncer) + if err != nil { + return nil, err + } + + forkDetector, err := processSync.NewBasicForkDetector(rounder) + if err != nil { + return nil, err + } + + shardsGenesisBlocks, err := generateGenesisHeadersAndApplyInitialBalances( + args.core, + args.state, + args.shardCoordinator, + args.nodesConfig, + args.genesisConfig, + ) + if err != nil { + return nil, err + } + + err = prepareGenesisBlock(args, shardsGenesisBlocks) + if err != nil { + return nil, err + } + + blockProcessor, blockTracker, err := newBlockProcessorAndTracker( + resolversFinder, + args.shardCoordinator, + args.nodesCoordinator, + args.economicsConfig, + args.data, + args.core, + args.state, + forkDetector, + shardsGenesisBlocks, + args.nodesConfig, + args.coreServiceContainer, + ) + + if err != nil { + return nil, err + } + + return &Process{ + InterceptorsContainer: interceptorsContainer, + ResolversFinder: resolversFinder, + Rounder: rounder, + ForkDetector: forkDetector, + BlockProcessor: blockProcessor, + BlockTracker: blockTracker, + }, nil } func prepareGenesisBlock(args *processComponentsFactoryArgs, shardsGenesisBlocks map[uint32]data.HeaderHandler) error { - genesisBlock, ok := shardsGenesisBlocks[args.shardCoordinator.SelfId()] - if !ok { - return errors.New("genesis block does not exists") - } - - genesisBlockHash, err := core.CalculateHash(args.core.Marshalizer, args.core.Hasher, genesisBlock) - if err != nil { - return err - } - - err = args.data.Blkc.SetGenesisHeader(genesisBlock) - if err != nil { - return err - } - - args.data.Blkc.SetGenesisHeaderHash(genesisBlockHash) - - marshalizedBlock, err := args.core.Marshalizer.Marshal(genesisBlock) - if err != nil { - return err - } - - if args.shardCoordinator.SelfId() == sharding.MetachainShardId { - errNotCritical := args.data.Store.Put(dataRetriever.MetaBlockUnit, genesisBlockHash, marshalizedBlock) - log.LogIfError(errNotCritical) - - } else { - errNotCritical := args.data.Store.Put(dataRetriever.BlockHeaderUnit, genesisBlockHash, marshalizedBlock) - log.LogIfError(errNotCritical) - } - - return nil + genesisBlock, ok := shardsGenesisBlocks[args.shardCoordinator.SelfId()] + if !ok { + return errors.New("genesis block does not exists") + } + + genesisBlockHash, err := core.CalculateHash(args.core.Marshalizer, args.core.Hasher, genesisBlock) + if err != nil { + return err + } + + err = args.data.Blkc.SetGenesisHeader(genesisBlock) + if err != nil { + return err + } + + args.data.Blkc.SetGenesisHeaderHash(genesisBlockHash) + + marshalizedBlock, err := args.core.Marshalizer.Marshal(genesisBlock) + if err != nil { + return err + } + + if args.shardCoordinator.SelfId() == sharding.MetachainShardId { + errNotCritical := args.data.Store.Put(dataRetriever.MetaBlockUnit, genesisBlockHash, marshalizedBlock) + log.LogIfError(errNotCritical) + + } else { + errNotCritical := args.data.Store.Put(dataRetriever.BlockHeaderUnit, genesisBlockHash, marshalizedBlock) + log.LogIfError(errNotCritical) + } + + return nil } type seedRandReader struct { - index int - seed []byte + index int + seed []byte } // NewSeedRandReader will return a new instance of a seed-based reader func NewSeedRandReader(seed []byte) *seedRandReader { - return &seedRandReader{seed: seed, index: 0} + return &seedRandReader{seed: seed, index: 0} } func (srr *seedRandReader) Read(p []byte) (n int, err error) { - if srr.seed == nil { - return 0, errors.New("nil seed") - } - if len(srr.seed) == 0 { - return 0, errors.New("empty seed") - } - if p == nil { - return 0, errors.New("nil buffer") - } - if len(p) == 0 { - return 0, errors.New("empty buffer") - } - - for i := 0; i < len(p); i++ { - p[i] = srr.seed[srr.index] - - srr.index++ - srr.index = srr.index % len(srr.seed) - } - - return len(p), nil + if srr.seed == nil { + return 0, errors.New("nil seed") + } + if len(srr.seed) == 0 { + return 0, errors.New("empty seed") + } + if p == nil { + return 0, errors.New("nil buffer") + } + if len(p) == 0 { + return 0, errors.New("empty buffer") + } + + for i := 0; i < len(p); i++ { + p[i] = srr.seed[srr.index] + + srr.index++ + srr.index = srr.index % len(srr.seed) + } + + return len(p), nil } // CreateStatusHandlerPresenter will return an instance of PresenterStatusHandler func CreateStatusHandlerPresenter() view.Presenter { - presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() + presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() - return presenterStatusHandlerFactory.Create() + return presenterStatusHandlerFactory.Create() } // CreateViews will start an termui console and will return an object if cannot create and start termuiConsole func CreateViews(presenter view.Presenter) ([]factoryViews.Viewer, error) { - viewsFactory, err := factoryViews.NewViewsFactory(presenter) - if err != nil { - return nil, err - } - - views, err := viewsFactory.Create() - if err != nil { - return nil, err - } - - for _, v := range views { - err = v.Start() - if err != nil { - return nil, err - } - } - - return views, nil + viewsFactory, err := factoryViews.NewViewsFactory(presenter) + if err != nil { + return nil, err + } + + views, err := viewsFactory.Create() + if err != nil { + return nil, err + } + + for _, v := range views { + err = v.Start() + if err != nil { + return nil, err + } + } + + return views, nil } func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - switch cfg.Hasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - return blake2b.Blake2b{}, nil - } - - return nil, errors.New("no hasher provided in config file") + switch cfg.Hasher.Type { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + return blake2b.Blake2b{}, nil + } + + return nil, errors.New("no hasher provided in config file") } func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { - switch cfg.Marshalizer.Type { - case "json": - return &marshal.JsonMarshalizer{}, nil - } + switch cfg.Marshalizer.Type { + case "json": + return &marshal.JsonMarshalizer{}, nil + } - return nil, errors.New("no marshalizer provided in config file") + return nil, errors.New("no marshalizer provided in config file") } func getTrie( - cfg config.StorageConfig, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - uniqueID string, + cfg config.StorageConfig, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + uniqueID string, ) (data.Trie, error) { - accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(cfg.Cache), - getDBFromConfig(cfg.DB, uniqueID), - getBloomFromConfig(cfg.Bloom), - ) - if err != nil { - return nil, errors.New("error creating accountsTrieStorage: " + err.Error()) - } + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(cfg.Cache), + getDBFromConfig(cfg.DB, uniqueID), + getBloomFromConfig(cfg.Bloom), + ) + if err != nil { + return nil, errors.New("error creating accountsTrieStorage: " + err.Error()) + } - return trie.NewTrie(accountsTrieStorage, marshalizer, hasher) + return trie.NewTrie(accountsTrieStorage, marshalizer, hasher) } func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { - badBlockCache, err := storageUnit.NewCache( - storageUnit.CacheType(config.BadBlocksCache.Type), - config.BadBlocksCache.Size, - config.BadBlocksCache.Shards) - if err != nil { - return nil, err - } - - if coordinator == nil { - return nil, state.ErrNilShardCoordinator - } - - if coordinator.SelfId() < coordinator.NumberOfShards() { - blockChain, err := blockchain.NewBlockChain(badBlockCache) - if err != nil { - return nil, err - } - - err = blockChain.SetAppStatusHandler(ash) - if err != nil { - return nil, err - } - - return blockChain, nil - } - if coordinator.SelfId() == sharding.MetachainShardId { - blockChain, err := blockchain.NewMetaChain(badBlockCache) - if err != nil { - return nil, err - } - - err = blockChain.SetAppStatusHandler(ash) - if err != nil { - return nil, err - } - - return blockChain, nil - } - return nil, errors.New("can not create blockchain") + badBlockCache, err := storageUnit.NewCache( + storageUnit.CacheType(config.BadBlocksCache.Type), + config.BadBlocksCache.Size, + config.BadBlocksCache.Shards) + if err != nil { + return nil, err + } + + if coordinator == nil { + return nil, state.ErrNilShardCoordinator + } + + if coordinator.SelfId() < coordinator.NumberOfShards() { + blockChain, err := blockchain.NewBlockChain(badBlockCache) + if err != nil { + return nil, err + } + + err = blockChain.SetAppStatusHandler(ash) + if err != nil { + return nil, err + } + + return blockChain, nil + } + if coordinator.SelfId() == sharding.MetachainShardId { + blockChain, err := blockchain.NewMetaChain(badBlockCache) + if err != nil { + return nil, err + } + + err = blockChain.SetAppStatusHandler(ash) + if err != nil { + return nil, err + } + + return blockChain, nil + } + return nil, errors.New("can not create blockchain") } func createDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return createMetaChainDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - return nil, errors.New("can not create data store") + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return createMetaChainDataStoreFromConfig(config, shardCoordinator, uniqueID) + } + return nil, errors.New("can not create data store") } func createShardDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - var headerUnit *storageUnit.Unit - var peerBlockUnit *storageUnit.Unit - var miniBlockUnit *storageUnit.Unit - var txUnit *storageUnit.Unit - var metachainHeaderUnit *storageUnit.Unit - var unsignedTxUnit *storageUnit.Unit - var rewardTxUnit *storageUnit.Unit - var metaHdrHashNonceUnit *storageUnit.Unit - var shardHdrHashNonceUnit *storageUnit.Unit - var err error - - defer func() { - // cleanup - if err != nil { - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if peerBlockUnit != nil { - _ = peerBlockUnit.DestroyUnit() - } - if miniBlockUnit != nil { - _ = miniBlockUnit.DestroyUnit() - } - if txUnit != nil { - _ = txUnit.DestroyUnit() - } - if unsignedTxUnit != nil { - _ = unsignedTxUnit.DestroyUnit() - } - if rewardTxUnit != nil { - _ = rewardTxUnit.DestroyUnit() - } - if metachainHeaderUnit != nil { - _ = metachainHeaderUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnit != nil { - _ = shardHdrHashNonceUnit.DestroyUnit() - } - } - }() - - txUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.TxStorage.Cache), - getDBFromConfig(config.TxStorage.DB, uniqueID), - getBloomFromConfig(config.TxStorage.Bloom)) - if err != nil { - return nil, err - } - - unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.UnsignedTransactionStorage.Cache), - getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), - getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) - if err != nil { - return nil, err - } - - rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.RewardTxStorage.Cache), - getDBFromConfig(config.RewardTxStorage.DB, uniqueID), - getBloomFromConfig(config.RewardTxStorage.Bloom)) - if err != nil { - return nil, err - } - - miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MiniBlocksStorage.Cache), - getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), - getBloomFromConfig(config.MiniBlocksStorage.Bloom)) - if err != nil { - return nil, err - } - - peerBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerBlockBodyStorage.Cache), - getDBFromConfig(config.PeerBlockBodyStorage.DB, uniqueID), - getBloomFromConfig(config.PeerBlockBodyStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metachainHeaderUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), - ) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnit, err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - shardCoordinator.SelfId(), - ) - if err != nil { - return nil, err - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, txUnit) - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) - store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) - - return store, err + var headerUnit *storageUnit.Unit + var peerBlockUnit *storageUnit.Unit + var miniBlockUnit *storageUnit.Unit + var txUnit *storageUnit.Unit + var metachainHeaderUnit *storageUnit.Unit + var unsignedTxUnit *storageUnit.Unit + var rewardTxUnit *storageUnit.Unit + var metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnit *storageUnit.Unit + var err error + + defer func() { + // cleanup + if err != nil { + if headerUnit != nil { + _ = headerUnit.DestroyUnit() + } + if peerBlockUnit != nil { + _ = peerBlockUnit.DestroyUnit() + } + if miniBlockUnit != nil { + _ = miniBlockUnit.DestroyUnit() + } + if txUnit != nil { + _ = txUnit.DestroyUnit() + } + if unsignedTxUnit != nil { + _ = unsignedTxUnit.DestroyUnit() + } + if rewardTxUnit != nil { + _ = rewardTxUnit.DestroyUnit() + } + if metachainHeaderUnit != nil { + _ = metachainHeaderUnit.DestroyUnit() + } + if metaHdrHashNonceUnit != nil { + _ = metaHdrHashNonceUnit.DestroyUnit() + } + if shardHdrHashNonceUnit != nil { + _ = shardHdrHashNonceUnit.DestroyUnit() + } + } + }() + + txUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.TxStorage.Cache), + getDBFromConfig(config.TxStorage.DB, uniqueID), + getBloomFromConfig(config.TxStorage.Bloom)) + if err != nil { + return nil, err + } + + unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.UnsignedTransactionStorage.Cache), + getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), + getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) + if err != nil { + return nil, err + } + + rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.RewardTxStorage.Cache), + getDBFromConfig(config.RewardTxStorage.DB, uniqueID), + getBloomFromConfig(config.RewardTxStorage.Bloom)) + if err != nil { + return nil, err + } + + miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MiniBlocksStorage.Cache), + getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), + getBloomFromConfig(config.MiniBlocksStorage.Bloom)) + if err != nil { + return nil, err + } + + peerBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.PeerBlockBodyStorage.Cache), + getDBFromConfig(config.PeerBlockBodyStorage.DB, uniqueID), + getBloomFromConfig(config.PeerBlockBodyStorage.Bloom)) + if err != nil { + return nil, err + } + + headerUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.BlockHeaderStorage.Cache), + getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), + getBloomFromConfig(config.BlockHeaderStorage.Bloom)) + if err != nil { + return nil, err + } + + metachainHeaderUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaBlockStorage.Cache), + getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), + getBloomFromConfig(config.MetaBlockStorage.Bloom)) + if err != nil { + return nil, err + } + + metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), + getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), + ) + if err != nil { + return nil, err + } + + shardHdrHashNonceUnit, err = storageUnit.NewShardedStorageUnitFromConf( + getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), + getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), + shardCoordinator.SelfId(), + ) + if err != nil { + return nil, err + } + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, txUnit) + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) + + return store, err } func createMetaChainDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit - var shardHdrHashNonceUnits []*storageUnit.Unit - var err error - - defer func() { - // cleanup - if err != nil { - if peerDataUnit != nil { - _ = peerDataUnit.DestroyUnit() - } - if shardDataUnit != nil { - _ = shardDataUnit.DestroyUnit() - } - if metaBlockUnit != nil { - _ = metaBlockUnit.DestroyUnit() - } - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnits != nil { - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - _ = shardHdrHashNonceUnits[i].DestroyUnit() - } - } - } - }() - - metaBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - shardDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.ShardDataStorage.Cache), - getDBFromConfig(config.ShardDataStorage.DB, uniqueID), - getBloomFromConfig(config.ShardDataStorage.Bloom)) - if err != nil { - return nil, err - } - - peerDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerDataStorage.Cache), - getDBFromConfig(config.PeerDataStorage.DB, uniqueID), - getBloomFromConfig(config.PeerDataStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), - ) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnits = make([]*storageUnit.Unit, shardCoordinator.NumberOfShards()) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - shardHdrHashNonceUnits[i], err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - i, - ) - if err != nil { - return nil, err - } - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) - store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) - store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) - } - - return store, err + var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnits []*storageUnit.Unit + var err error + + defer func() { + // cleanup + if err != nil { + if peerDataUnit != nil { + _ = peerDataUnit.DestroyUnit() + } + if shardDataUnit != nil { + _ = shardDataUnit.DestroyUnit() + } + if metaBlockUnit != nil { + _ = metaBlockUnit.DestroyUnit() + } + if headerUnit != nil { + _ = headerUnit.DestroyUnit() + } + if metaHdrHashNonceUnit != nil { + _ = metaHdrHashNonceUnit.DestroyUnit() + } + if shardHdrHashNonceUnits != nil { + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + _ = shardHdrHashNonceUnits[i].DestroyUnit() + } + } + } + }() + + metaBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaBlockStorage.Cache), + getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), + getBloomFromConfig(config.MetaBlockStorage.Bloom)) + if err != nil { + return nil, err + } + + shardDataUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.ShardDataStorage.Cache), + getDBFromConfig(config.ShardDataStorage.DB, uniqueID), + getBloomFromConfig(config.ShardDataStorage.Bloom)) + if err != nil { + return nil, err + } + + peerDataUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.PeerDataStorage.Cache), + getDBFromConfig(config.PeerDataStorage.DB, uniqueID), + getBloomFromConfig(config.PeerDataStorage.Bloom)) + if err != nil { + return nil, err + } + + headerUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.BlockHeaderStorage.Cache), + getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), + getBloomFromConfig(config.BlockHeaderStorage.Bloom)) + if err != nil { + return nil, err + } + + metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), + getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), + ) + if err != nil { + return nil, err + } + + shardHdrHashNonceUnits = make([]*storageUnit.Unit, shardCoordinator.NumberOfShards()) + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + shardHdrHashNonceUnits[i], err = storageUnit.NewShardedStorageUnitFromConf( + getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), + getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), + i, + ) + if err != nil { + return nil, err + } + } + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) + store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) + store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) + } + + return store, err } func createShardDataPoolFromConfig( - config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + config *config.Config, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.PoolsHolder, error) { - log.Info("creatingShardDataPool from config") - - txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) - if err != nil { - log.Info("error creating txpool") - return nil, err - } - - uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) - if err != nil { - log.Info("error creating smart contract result pool") - return nil, err - } - - rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) - if err != nil { - log.Info("error creating reward transaction pool") - return nil, err - } - - cacherCfg := getCacherFromConfig(config.BlockHeaderDataPool) - hdrPool, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating hdrpool") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating metaBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.BlockHeaderNoncesDataPool) - hdrNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating hdrNoncesCacher") - return nil, err - } - hdrNonces, err := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSliceConverter) - if err != nil { - log.Info("error creating hdrNonces") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) - txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating txBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.PeerBlockBodyDataPool) - peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating peerChangeBlockBody") - return nil, err - } - - return dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlockBody, - ) + log.Info("creatingShardDataPool from config") + + txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) + if err != nil { + log.Info("error creating txpool") + return nil, err + } + + uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) + if err != nil { + log.Info("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) + if err != nil { + log.Info("error creating reward transaction pool") + return nil, err + } + + cacherCfg := getCacherFromConfig(config.BlockHeaderDataPool) + hdrPool, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating hdrpool") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.MetaBlockBodyDataPool) + metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating metaBlockBody") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.BlockHeaderNoncesDataPool) + hdrNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating hdrNoncesCacher") + return nil, err + } + hdrNonces, err := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSliceConverter) + if err != nil { + log.Info("error creating hdrNonces") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) + txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating txBlockBody") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.PeerBlockBodyDataPool) + peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating peerChangeBlockBody") + return nil, err + } + + return dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlockBody, + ) } func createMetaDataPoolFromConfig( - config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + config *config.Config, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.MetaPoolsHolder, error) { - cacherCfg := getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating metaBlockBody") - return nil, err - } - - miniBlockHashes, err := shardedData.NewShardedData(getCacherFromConfig(config.MiniBlockHeaderHashesDataPool)) - if err != nil { - log.Info("error creating miniBlockHashes") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.ShardHeadersDataPool) - shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating shardHeaders") - return nil, err - } - - headersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating shard headers nonces pool") - return nil, err - } - headersNonces, err := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSliceConverter) - if err != nil { - log.Info("error creating shard headers nonces pool") - return nil, err - } - - return dataPool.NewMetaDataPool(metaBlockBody, miniBlockHashes, shardHeaders, headersNonces) + cacherCfg := getCacherFromConfig(config.MetaBlockBodyDataPool) + metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating metaBlockBody") + return nil, err + } + + miniBlockHashes, err := shardedData.NewShardedData(getCacherFromConfig(config.MiniBlockHeaderHashesDataPool)) + if err != nil { + log.Info("error creating miniBlockHashes") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.ShardHeadersDataPool) + shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating shardHeaders") + return nil, err + } + + headersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating shard headers nonces pool") + return nil, err + } + headersNonces, err := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSliceConverter) + if err != nil { + log.Info("error creating shard headers nonces pool") + return nil, err + } + + return dataPool.NewMetaDataPool(metaBlockBody, miniBlockHashes, shardHeaders, headersNonces) } func createSingleSigner(config *config.Config) (crypto.SingleSigner, error) { - switch config.Consensus.Type { - case BlsConsensusType: - return &singlesig.BlsSingleSigner{}, nil - case BnConsensusType: - return &singlesig.SchnorrSigner{}, nil - } - - return nil, errors.New("no consensus type provided in config file") + switch config.Consensus.Type { + case BlsConsensusType: + return &singlesig.BlsSingleSigner{}, nil + case BnConsensusType: + return &singlesig.SchnorrSigner{}, nil + } + + return nil, errors.New("no consensus type provided in config file") } func getMultisigHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - if cfg.Consensus.Type == BlsConsensusType && cfg.MultisigHasher.Type != "blake2b" { - return nil, errors.New("wrong multisig hasher provided for bls consensus type") - } - - switch cfg.MultisigHasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - if cfg.Consensus.Type == BlsConsensusType { - return blake2b.Blake2b{HashSize: BlsHashSize}, nil - } - return blake2b.Blake2b{}, nil - } - - return nil, errors.New("no multisig hasher provided in config file") + if cfg.Consensus.Type == BlsConsensusType && cfg.MultisigHasher.Type != "blake2b" { + return nil, errors.New("wrong multisig hasher provided for bls consensus type") + } + + switch cfg.MultisigHasher.Type { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + if cfg.Consensus.Type == BlsConsensusType { + return blake2b.Blake2b{HashSize: BlsHashSize}, nil + } + return blake2b.Blake2b{}, nil + } + + return nil, errors.New("no multisig hasher provided in config file") } func createMultiSigner( - config *config.Config, - hasher hashing.Hasher, - pubKeys []string, - privateKey crypto.PrivateKey, - keyGen crypto.KeyGenerator, + config *config.Config, + hasher hashing.Hasher, + pubKeys []string, + privateKey crypto.PrivateKey, + keyGen crypto.KeyGenerator, ) (crypto.MultiSigner, error) { - switch config.Consensus.Type { - case BlsConsensusType: - blsSigner := &blsMultiSig.KyberMultiSignerBLS{} - return multisig.NewBLSMultisig(blsSigner, hasher, pubKeys, privateKey, keyGen, uint16(0)) - case BnConsensusType: - return multisig.NewBelNevMultisig(hasher, pubKeys, privateKey, keyGen, uint16(0)) - } + switch config.Consensus.Type { + case BlsConsensusType: + blsSigner := &blsMultiSig.KyberMultiSignerBLS{} + return multisig.NewBLSMultisig(blsSigner, hasher, pubKeys, privateKey, keyGen, uint16(0)) + case BnConsensusType: + return multisig.NewBelNevMultisig(hasher, pubKeys, privateKey, keyGen, uint16(0)) + } - return nil, errors.New("no consensus type provided in config file") + return nil, errors.New("no consensus type provided in config file") } func createNetMessenger( - p2pConfig *config.P2PConfig, - log *logger.Logger, - randReader io.Reader, + p2pConfig *config.P2PConfig, + log *logger.Logger, + randReader io.Reader, ) (p2p.Messenger, error) { - if p2pConfig.Node.Port < 0 { - return nil, errors.New("cannot start node on port < 0") - } + if p2pConfig.Node.Port < 0 { + return nil, errors.New("cannot start node on port < 0") + } - pDiscoveryFactory := factoryP2P.NewPeerDiscovererCreator(*p2pConfig) - pDiscoverer, err := pDiscoveryFactory.CreatePeerDiscoverer() + pDiscoveryFactory := factoryP2P.NewPeerDiscovererCreator(*p2pConfig) + pDiscoverer, err := pDiscoveryFactory.CreatePeerDiscoverer() - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - log.Info(fmt.Sprintf("Starting with peer discovery: %s", pDiscoverer.Name())) + log.Info(fmt.Sprintf("Starting with peer discovery: %s", pDiscoverer.Name())) - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), randReader) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), randReader) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - nm, err := libp2p.NewNetworkMessenger( - context.Background(), - p2pConfig.Node.Port, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - pDiscoverer, - libp2p.ListenAddrWithIp4AndTcp, - ) - if err != nil { - return nil, err - } + nm, err := libp2p.NewNetworkMessenger( + context.Background(), + p2pConfig.Node.Port, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + pDiscoverer, + libp2p.ListenAddrWithIp4AndTcp, + ) + if err != nil { + return nil, err + } - return nm, nil + return nm, nil } func newInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory( - shardCoordinator, - nodesCoordinator, - data, - core, - crypto, - state, - network, - ) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory( - shardCoordinator, - nodesCoordinator, - data, - core, - crypto, - network, - ) - } - - return nil, nil, errors.New("could not create interceptor and resolver container factory") + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return newShardInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + state, + network, + ) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return newMetaInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + network, + ) + } + + return nil, nil, errors.New("could not create interceptor and resolver container factory") } func newShardInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator - interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - core.Hasher, - crypto.TxSignKeyGen, - crypto.TxSingleSigner, - crypto.MultiSigner, - data.Datapool, - state.AddressConverter, - ) - if err != nil { - return nil, nil, err - } - - dataPacker, err := partitioning.NewSizeDataPacker(core.Marshalizer) - if err != nil { - return nil, nil, err - } - - resolversContainerFactory, err := shardfactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - data.Datapool, - core.Uint64ByteSliceConverter, - dataPacker, - ) - if err != nil { - return nil, nil, err - } - - return interceptorContainerFactory, resolversContainerFactory, nil + //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + core.Hasher, + crypto.TxSignKeyGen, + crypto.TxSingleSigner, + crypto.MultiSigner, + data.Datapool, + state.AddressConverter, + ) + if err != nil { + return nil, nil, err + } + + dataPacker, err := partitioning.NewSizeDataPacker(core.Marshalizer) + if err != nil { + return nil, nil, err + } + + resolversContainerFactory, err := shardfactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + data.Datapool, + core.Uint64ByteSliceConverter, + dataPacker, + ) + if err != nil { + return nil, nil, err + } + + return interceptorContainerFactory, resolversContainerFactory, nil } func newMetaInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator - interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - core.Hasher, - crypto.MultiSigner, - data.MetaDatapool, - ) - if err != nil { - return nil, nil, err - } - resolversContainerFactory, err := metafactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - data.MetaDatapool, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - return interceptorContainerFactory, resolversContainerFactory, nil + //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + core.Hasher, + crypto.MultiSigner, + data.MetaDatapool, + ) + if err != nil { + return nil, nil, err + } + resolversContainerFactory, err := metafactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + data.MetaDatapool, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + return interceptorContainerFactory, resolversContainerFactory, nil } func generateGenesisHeadersAndApplyInitialBalances( - coreComponents *Core, - stateComponents *State, - shardCoordinator sharding.Coordinator, - nodesSetup *sharding.NodesSetup, - genesisConfig *sharding.Genesis, + coreComponents *Core, + stateComponents *State, + shardCoordinator sharding.Coordinator, + nodesSetup *sharding.NodesSetup, + genesisConfig *sharding.Genesis, ) (map[uint32]data.HeaderHandler, error) { - //TODO change this rudimentary startup for metachain nodes - // Talk between Adrian, Robert and Iulian, did not want it to be discarded: - // -------------------------------------------------------------------- - // Adrian: "This looks like a workaround as the metchain should not deal with individual accounts, but shards data. - // What I was thinking was that the genesis on metachain (or pre-genesis block) is the nodes allocation to shards, - // with 0 state root for every shard, as there is no balance yet. - // Then the shards start operating as they get the initial node allocation, maybe we can do consensus on the - // genesis as well, I think this would be actually good as then everything is signed and agreed upon. - // The genesis shard blocks need to be then just the state root, I think we already have that in genesis, - // so shard nodes can go ahead with individually creating the block, but then run consensus on this. - // Then this block is sent to metachain who updates the state root of every shard and creates the metablock for - // the genesis of each of the shards (this is actually the same thing that would happen at new epoch start)." - - shardsGenesisBlocks := make(map[uint32]data.HeaderHandler) - - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - isCurrentShard := shardId == shardCoordinator.SelfId() - if isCurrentShard { - continue - } - - newShardCoordinator, account, err := createInMemoryShardCoordinatorAndAccount( - coreComponents, - shardCoordinator.NumberOfShards(), - shardId, - ) - if err != nil { - return nil, err - } - - genesisBlock, err := createGenesisBlockAndApplyInitialBalances( - account, - newShardCoordinator, - stateComponents.AddressConverter, - genesisConfig, - uint64(nodesSetup.StartTime), - ) - if err != nil { - return nil, err - } - - shardsGenesisBlocks[shardId] = genesisBlock - } - - genesisBlockForCurrentShard, err := createGenesisBlockAndApplyInitialBalances( - stateComponents.AccountsAdapter, - shardCoordinator, - stateComponents.AddressConverter, - genesisConfig, - uint64(nodesSetup.StartTime), - ) - if err != nil { - return nil, err - } - - shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard - - genesisBlock, err := genesis.CreateMetaGenesisBlock( - uint64(nodesSetup.StartTime), - nodesSetup.InitialNodesPubKeys(), - ) - - if err != nil { - return nil, err - } - - shardsGenesisBlocks[sharding.MetachainShardId] = genesisBlock - - return shardsGenesisBlocks, nil + //TODO change this rudimentary startup for metachain nodes + // Talk between Adrian, Robert and Iulian, did not want it to be discarded: + // -------------------------------------------------------------------- + // Adrian: "This looks like a workaround as the metchain should not deal with individual accounts, but shards data. + // What I was thinking was that the genesis on metachain (or pre-genesis block) is the nodes allocation to shards, + // with 0 state root for every shard, as there is no balance yet. + // Then the shards start operating as they get the initial node allocation, maybe we can do consensus on the + // genesis as well, I think this would be actually good as then everything is signed and agreed upon. + // The genesis shard blocks need to be then just the state root, I think we already have that in genesis, + // so shard nodes can go ahead with individually creating the block, but then run consensus on this. + // Then this block is sent to metachain who updates the state root of every shard and creates the metablock for + // the genesis of each of the shards (this is actually the same thing that would happen at new epoch start)." + + shardsGenesisBlocks := make(map[uint32]data.HeaderHandler) + + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + isCurrentShard := shardId == shardCoordinator.SelfId() + if isCurrentShard { + continue + } + + newShardCoordinator, account, err := createInMemoryShardCoordinatorAndAccount( + coreComponents, + shardCoordinator.NumberOfShards(), + shardId, + ) + if err != nil { + return nil, err + } + + genesisBlock, err := createGenesisBlockAndApplyInitialBalances( + account, + newShardCoordinator, + stateComponents.AddressConverter, + genesisConfig, + uint64(nodesSetup.StartTime), + ) + if err != nil { + return nil, err + } + + shardsGenesisBlocks[shardId] = genesisBlock + } + + genesisBlockForCurrentShard, err := createGenesisBlockAndApplyInitialBalances( + stateComponents.AccountsAdapter, + shardCoordinator, + stateComponents.AddressConverter, + genesisConfig, + uint64(nodesSetup.StartTime), + ) + if err != nil { + return nil, err + } + + shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard + + genesisBlock, err := genesis.CreateMetaGenesisBlock( + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) + + if err != nil { + return nil, err + } + + shardsGenesisBlocks[sharding.MetachainShardId] = genesisBlock + + return shardsGenesisBlocks, nil } func createGenesisBlockAndApplyInitialBalances( - accounts state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - addressConverter state.AddressConverter, - genesisConfig *sharding.Genesis, - startTime uint64, + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + addressConverter state.AddressConverter, + genesisConfig *sharding.Genesis, + startTime uint64, ) (data.HeaderHandler, error) { - initialBalances, err := genesisConfig.InitialNodesBalances(shardCoordinator, addressConverter) - if err != nil { - return nil, err - } - - return genesis.CreateShardGenesisBlockFromInitialBalances( - accounts, - shardCoordinator, - addressConverter, - initialBalances, - startTime, - ) + initialBalances, err := genesisConfig.InitialNodesBalances(shardCoordinator, addressConverter) + if err != nil { + return nil, err + } + + return genesis.CreateShardGenesisBlockFromInitialBalances( + accounts, + shardCoordinator, + addressConverter, + initialBalances, + startTime, + ) } func createInMemoryShardCoordinatorAndAccount( - coreComponents *Core, - numOfShards uint32, - shardId uint32, + coreComponents *Core, + numOfShards uint32, + shardId uint32, ) (sharding.Coordinator, state.AccountsAdapter, error) { - newShardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, shardId) - if err != nil { - return nil, nil, err - } + newShardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, shardId) + if err != nil { + return nil, nil, err + } - accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) - if err != nil { - return nil, nil, err - } + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) + if err != nil { + return nil, nil, err + } - accounts := generateInMemoryAccountsAdapter( - accountFactory, - coreComponents.Hasher, - coreComponents.Marshalizer, - ) + accounts := generateInMemoryAccountsAdapter( + accountFactory, + coreComponents.Hasher, + coreComponents.Marshalizer, + ) - return newShardCoordinator, accounts, nil + return newShardCoordinator, accounts, nil } func newBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - economicsConfig *config.EconomicsConfig, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + economicsConfig *config.EconomicsConfig, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { - return nil, nil, errors.New("rewards configuration missing") - } - - communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) - if err != nil { - return nil, nil, err - } - - burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) - if err != nil { - return nil, nil, err - } - - specialAddressHolder, err := address.NewSpecialAddressHolder( - communityAddress, - burnAddress, - state.AddressConverter, - shardCoordinator) - if err != nil { - return nil, nil, err - } - - // TODO: remove nodesConfig as no longer needed with nodes coordinator available - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker( - resolversFinder, - shardCoordinator, - nodesCoordinator, - specialAddressHolder, - data, - core, - state, - forkDetector, - shardsGenesisBlocks, - nodesConfig, - coreServiceContainer, - ) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker( - resolversFinder, - shardCoordinator, - nodesCoordinator, - specialAddressHolder, - data, - core, - state, - forkDetector, - shardsGenesisBlocks, - coreServiceContainer, - ) - } - - return nil, nil, errors.New("could not create block processor and tracker") + if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { + return nil, nil, errors.New("rewards configuration missing") + } + + communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) + if err != nil { + return nil, nil, err + } + + burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) + if err != nil { + return nil, nil, err + } + + specialAddressHolder, err := address.NewSpecialAddressHolder( + communityAddress, + burnAddress, + state.AddressConverter, + shardCoordinator) + if err != nil { + return nil, nil, err + } + + // TODO: remove nodesConfig as no longer needed with nodes coordinator available + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return newShardBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + nodesConfig, + coreServiceContainer, + ) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return newMetaBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + ) + } + + return nil, nil, errors.New("could not create block processor and tracker") } func newShardBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - argsParser, err := smartContract.NewAtArgumentParser() - if err != nil { - return nil, nil, err - } - - vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) - if err != nil { - return nil, nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, nil, err - } - - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - core.Marshalizer, - core.Hasher, - state.AddressConverter, - specialAddressHandler, - data.Store, - data.Datapool, - ) - if err != nil { - return nil, nil, err - } - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, nil, err - } - - rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) - if err != nil { - return nil, nil, err - } - - rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - scProcessor, err := smartContract.NewSmartContractProcessor( - vmContainer, - argsParser, - core.Hasher, - core.Marshalizer, - state.AccountsAdapter, - vmFactory.VMAccountsDB(), - state.AddressConverter, - shardCoordinator, - scForwarder, - rewardsTxHandler, - ) - if err != nil { - return nil, nil, err - } - - requestHandler, err := requestHandlers.NewShardResolverRequestHandler( - resolversFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - MaxTxsToRequest, - ) - if err != nil { - return nil, nil, err - } - - rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( - state.AccountsAdapter, - state.AddressConverter, - shardCoordinator, - rewardsTxInterim, - ) - if err != nil { - return nil, nil, err - } - - txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) - if err != nil { - return nil, nil, err - } - - transactionProcessor, err := transaction.NewTxProcessor( - state.AccountsAdapter, - core.Hasher, - state.AddressConverter, - core.Marshalizer, - shardCoordinator, - scProcessor, - rewardsTxHandler, - txTypeHandler, - ) - if err != nil { - return nil, nil, errors.New("could not create transaction processor: " + err.Error()) - } - - blockTracker, err := track.NewShardBlockTracker( - data.Datapool, - core.Marshalizer, - shardCoordinator, - data.Store, - ) - if err != nil { - return nil, nil, err - } - - preProcFactory, err := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - data.Store, - core.Marshalizer, - core.Hasher, - data.Datapool, - state.AddressConverter, - state.AccountsAdapter, - requestHandler, - transactionProcessor, - scProcessor, - scProcessor, - rewardsTxProcessor, - ) - if err != nil { - return nil, nil, err - } - - preProcContainer, err := preProcFactory.Create() - if err != nil { - return nil, nil, err - } - - txCoordinator, err := coordinator.NewTransactionCoordinator( - shardCoordinator, - state.AccountsAdapter, - data.Datapool, - requestHandler, - preProcContainer, - interimProcContainer, - ) - if err != nil { - return nil, nil, err - } - - blockProcessor, err := block.NewShardProcessor( - coreServiceContainer, - data.Datapool, - data.Store, - core.Hasher, - core.Marshalizer, - state.AccountsAdapter, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - forkDetector, - blockTracker, - shardsGenesisBlocks, - requestHandler, - txCoordinator, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) - } - - err = blockProcessor.SetAppStatusHandler(core.StatusHandler) - if err != nil { - return nil, nil, err - } - - return blockProcessor, blockTracker, nil + argsParser, err := smartContract.NewAtArgumentParser() + if err != nil { + return nil, nil, err + } + + vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) + if err != nil { + return nil, nil, err + } + + vmContainer, err := vmFactory.Create() + if err != nil { + return nil, nil, err + } + + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + core.Marshalizer, + core.Hasher, + state.AddressConverter, + specialAddressHandler, + data.Store, + data.Datapool, + ) + if err != nil { + return nil, nil, err + } + + interimProcContainer, err := interimProcFactory.Create() + if err != nil { + return nil, nil, err + } + + scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return nil, nil, err + } + + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) + if err != nil { + return nil, nil, err + } + + rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + scProcessor, err := smartContract.NewSmartContractProcessor( + vmContainer, + argsParser, + core.Hasher, + core.Marshalizer, + state.AccountsAdapter, + vmFactory.VMAccountsDB(), + state.AddressConverter, + shardCoordinator, + scForwarder, + rewardsTxHandler, + ) + if err != nil { + return nil, nil, err + } + + requestHandler, err := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + MaxTxsToRequest, + ) + if err != nil { + return nil, nil, err + } + + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( + state.AccountsAdapter, + state.AddressConverter, + shardCoordinator, + rewardsTxInterim, + ) + if err != nil { + return nil, nil, err + } + + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) + if err != nil { + return nil, nil, err + } + + transactionProcessor, err := transaction.NewTxProcessor( + state.AccountsAdapter, + core.Hasher, + state.AddressConverter, + core.Marshalizer, + shardCoordinator, + scProcessor, + rewardsTxHandler, + txTypeHandler, + ) + if err != nil { + return nil, nil, errors.New("could not create transaction processor: " + err.Error()) + } + + blockTracker, err := track.NewShardBlockTracker( + data.Datapool, + core.Marshalizer, + shardCoordinator, + data.Store, + ) + if err != nil { + return nil, nil, err + } + + preProcFactory, err := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + data.Store, + core.Marshalizer, + core.Hasher, + data.Datapool, + state.AddressConverter, + state.AccountsAdapter, + requestHandler, + transactionProcessor, + scProcessor, + scProcessor, + rewardsTxProcessor, + ) + if err != nil { + return nil, nil, err + } + + preProcContainer, err := preProcFactory.Create() + if err != nil { + return nil, nil, err + } + + txCoordinator, err := coordinator.NewTransactionCoordinator( + shardCoordinator, + state.AccountsAdapter, + data.Datapool, + requestHandler, + preProcContainer, + interimProcContainer, + ) + if err != nil { + return nil, nil, err + } + + blockProcessor, err := block.NewShardProcessor( + coreServiceContainer, + data.Datapool, + data.Store, + core.Hasher, + core.Marshalizer, + state.AccountsAdapter, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + forkDetector, + blockTracker, + shardsGenesisBlocks, + requestHandler, + txCoordinator, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, errors.New("could not create block processor: " + err.Error()) + } + + err = blockProcessor.SetAppStatusHandler(core.StatusHandler) + if err != nil { + return nil, nil, err + } + + return blockProcessor, blockTracker, nil } func newMetaBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( - resolversFinder, - factory.ShardHeadersForMetachainTopic, - ) - - if err != nil { - return nil, nil, err - } - - blockTracker, err := track.NewMetaBlockTracker() - if err != nil { - return nil, nil, err - } - - metaProcessor, err := block.NewMetaProcessor( - coreServiceContainer, - state.AccountsAdapter, - data.MetaDatapool, - forkDetector, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - core.Hasher, - core.Marshalizer, - data.Store, - shardsGenesisBlocks, - requestHandler, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) - } - - err = metaProcessor.SetAppStatusHandler(core.StatusHandler) - if err != nil { - return nil, nil, err - } - - return metaProcessor, blockTracker, nil + requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( + resolversFinder, + factory.ShardHeadersForMetachainTopic, + ) + + if err != nil { + return nil, nil, err + } + + blockTracker, err := track.NewMetaBlockTracker() + if err != nil { + return nil, nil, err + } + + metaProcessor, err := block.NewMetaProcessor( + coreServiceContainer, + state.AccountsAdapter, + data.MetaDatapool, + forkDetector, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + core.Hasher, + core.Marshalizer, + data.Store, + shardsGenesisBlocks, + requestHandler, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, errors.New("could not create block processor: " + err.Error()) + } + + err = metaProcessor.SetAppStatusHandler(core.StatusHandler) + if err != nil { + return nil, nil, err + } + + return metaProcessor, blockTracker, nil } func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { - return storageUnit.CacheConfig{ - Size: cfg.Size, - Type: storageUnit.CacheType(cfg.Type), - Shards: cfg.Shards, - } + return storageUnit.CacheConfig{ + Size: cfg.Size, + Type: storageUnit.CacheType(cfg.Type), + Shards: cfg.Shards, + } } func getDBFromConfig(cfg config.DBConfig, uniquePath string) storageUnit.DBConfig { - return storageUnit.DBConfig{ - FilePath: filepath.Join(uniquePath, cfg.FilePath), - Type: storageUnit.DBType(cfg.Type), - MaxBatchSize: cfg.MaxBatchSize, - BatchDelaySeconds: cfg.BatchDelaySeconds, - MaxOpenFiles: cfg.MaxOpenFiles, - } + return storageUnit.DBConfig{ + FilePath: filepath.Join(uniquePath, cfg.FilePath), + Type: storageUnit.DBType(cfg.Type), + MaxBatchSize: cfg.MaxBatchSize, + BatchDelaySeconds: cfg.BatchDelaySeconds, + MaxOpenFiles: cfg.MaxOpenFiles, + } } func getBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { - var hashFuncs []storageUnit.HasherType - if cfg.HashFunc != nil { - hashFuncs = make([]storageUnit.HasherType, 0) - for _, hf := range cfg.HashFunc { - hashFuncs = append(hashFuncs, storageUnit.HasherType(hf)) - } - } - - return storageUnit.BloomConfig{ - Size: cfg.Size, - HashFunc: hashFuncs, - } + var hashFuncs []storageUnit.HasherType + if cfg.HashFunc != nil { + hashFuncs = make([]storageUnit.HasherType, 0) + for _, hf := range cfg.HashFunc { + hashFuncs = append(hashFuncs, storageUnit.HasherType(hf)) + } + } + + return storageUnit.BloomConfig{ + Size: cfg.Size, + HashFunc: hashFuncs, + } } func generateInMemoryAccountsAdapter( - accountFactory state.AccountFactory, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, + accountFactory state.AccountFactory, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, ) state.AccountsAdapter { - tr, _ := trie.NewTrie(createMemUnit(), marshalizer, hasher) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, accountFactory) + tr, _ := trie.NewTrie(createMemUnit(), marshalizer, hasher) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, accountFactory) - return adb + return adb } func createMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.New() + unit, _ := storageUnit.NewStorageUnit(cache, persist) + return unit } // GetSigningParams returns a key generator, a private key, and a public key func GetSigningParams( - ctx *cli.Context, - log *logger.Logger, - skName string, - skIndexName string, - skPemFileName string, - suite crypto.Suite, + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, + suite crypto.Suite, ) (keyGen crypto.KeyGenerator, privKey crypto.PrivateKey, pubKey crypto.PublicKey, err error) { - sk, err := getSk(ctx, log, skName, skIndexName, skPemFileName) - if err != nil { - return nil, nil, nil, err - } + sk, err := getSk(ctx, log, skName, skIndexName, skPemFileName) + if err != nil { + return nil, nil, nil, err + } - keyGen = signing.NewKeyGenerator(suite) + keyGen = signing.NewKeyGenerator(suite) - privKey, err = keyGen.PrivateKeyFromByteArray(sk) - if err != nil { - return nil, nil, nil, err - } + privKey, err = keyGen.PrivateKeyFromByteArray(sk) + if err != nil { + return nil, nil, nil, err + } - pubKey = privKey.GeneratePublic() + pubKey = privKey.GeneratePublic() - return keyGen, privKey, pubKey, err + return keyGen, privKey, pubKey, err } // GetPkEncoded returns the encoded public key func GetPkEncoded(pubKey crypto.PublicKey) string { - pk, err := pubKey.ToByteArray() - if err != nil { - return err.Error() - } + pk, err := pubKey.ToByteArray() + if err != nil { + return err.Error() + } - return encodeAddress(pk) + return encodeAddress(pk) } func encodeAddress(address []byte) string { - return hex.EncodeToString(address) + return hex.EncodeToString(address) } func decodeAddress(address string) ([]byte, error) { - return hex.DecodeString(address) + return hex.DecodeString(address) } func getSk( - ctx *cli.Context, - log *logger.Logger, - skName string, - skIndexName string, - skPemFileName string, + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, ) ([]byte, error) { - //if flag is defined, it shall overwrite what was read from pem file - if ctx.GlobalIsSet(skName) { - encodedSk := []byte(ctx.GlobalString(skName)) - return decodeAddress(string(encodedSk)) - } + //if flag is defined, it shall overwrite what was read from pem file + if ctx.GlobalIsSet(skName) { + encodedSk := []byte(ctx.GlobalString(skName)) + return decodeAddress(string(encodedSk)) + } - skIndex := ctx.GlobalInt(skIndexName) - encodedSk, err := core.LoadSkFromPemFile(skPemFileName, log, skIndex) - if err != nil { - return nil, err - } + skIndex := ctx.GlobalInt(skIndexName) + encodedSk, err := core.LoadSkFromPemFile(skPemFileName, log, skIndex) + if err != nil { + return nil, err + } - return decodeAddress(string(encodedSk)) + return decodeAddress(string(encodedSk)) } diff --git a/config/config.go b/config/config.go index c10f42ec108..60a45ef4d75 100644 --- a/config/config.go +++ b/config/config.go @@ -4,192 +4,192 @@ import "time" // CacheConfig will map the json cache configuration type CacheConfig struct { - Size uint32 `json:"size"` - Type string `json:"type"` - Shards uint32 `json:"shards"` + Size uint32 `json:"size"` + Type string `json:"type"` + Shards uint32 `json:"shards"` } // DBConfig will map the json db configuration type DBConfig struct { - FilePath string `json:"file"` - Type string `json:"type"` - BatchDelaySeconds int `json:"batchDelaySeconds"` - MaxBatchSize int `json:"maxBatchSize"` - MaxOpenFiles int `json:"maxOpenFiles"` + FilePath string `json:"file"` + Type string `json:"type"` + BatchDelaySeconds int `json:"batchDelaySeconds"` + MaxBatchSize int `json:"maxBatchSize"` + MaxOpenFiles int `json:"maxOpenFiles"` } // BloomFilterConfig will map the json bloom filter configuration type BloomFilterConfig struct { - Size uint `json:"size"` - HashFunc []string `json:"hashFunc"` + Size uint `json:"size"` + HashFunc []string `json:"hashFunc"` } // StorageConfig will map the json storage unit configuration type StorageConfig struct { - Cache CacheConfig `json:"cache"` - DB DBConfig `json:"db"` - Bloom BloomFilterConfig `json:"bloom"` + Cache CacheConfig `json:"cache"` + DB DBConfig `json:"db"` + Bloom BloomFilterConfig `json:"bloom"` } // LoggerConfig will map the json logger configuration type LoggerConfig struct { - Path string `json:"path"` - StackTraceDepth int `json:"stackTraceDepth"` + Path string `json:"path"` + StackTraceDepth int `json:"stackTraceDepth"` } // AddressConfig will map the json address configuration type AddressConfig struct { - Length int `json:"length"` - Prefix string `json:"prefix"` + Length int `json:"length"` + Prefix string `json:"prefix"` } // TypeConfig will map the json string type configuration type TypeConfig struct { - Type string `json:"type"` + Type string `json:"type"` } // NTPConfig will hold the configuration for NTP queries type NTPConfig struct { - Host string - Port int - Timeout time.Duration - Version int + Host string + Port int + Timeout time.Duration + Version int } // EconomicsConfig will hold the reward configuration type EconomicsConfig struct { - CommunityAddress string - BurnAddress string + CommunityAddress string + BurnAddress string } // Config will hold the entire application configuration parameters type Config struct { - MiniBlocksStorage StorageConfig - PeerBlockBodyStorage StorageConfig - BlockHeaderStorage StorageConfig - TxStorage StorageConfig - UnsignedTransactionStorage StorageConfig - RewardTxStorage StorageConfig - ShardHdrNonceHashStorage StorageConfig - MetaHdrNonceHashStorage StorageConfig - - ShardDataStorage StorageConfig - MetaBlockStorage StorageConfig - PeerDataStorage StorageConfig - - AccountsTrieStorage StorageConfig - BadBlocksCache CacheConfig - - TxBlockBodyDataPool CacheConfig - StateBlockBodyDataPool CacheConfig - PeerBlockBodyDataPool CacheConfig - BlockHeaderDataPool CacheConfig - BlockHeaderNoncesDataPool CacheConfig - TxDataPool CacheConfig - UnsignedTransactionDataPool CacheConfig - RewardTransactionDataPool CacheConfig - MetaBlockBodyDataPool CacheConfig - - MiniBlockHeaderHashesDataPool CacheConfig - ShardHeadersDataPool CacheConfig - MetaHeaderNoncesDataPool CacheConfig - - Logger LoggerConfig - Address AddressConfig - Hasher TypeConfig - MultisigHasher TypeConfig - Marshalizer TypeConfig - - ResourceStats ResourceStatsConfig - Heartbeat HeartbeatConfig - GeneralSettings GeneralSettingsConfig - Consensus TypeConfig - Explorer ExplorerConfig - - NTPConfig NTPConfig - EconomicsConfig EconomicsConfig + MiniBlocksStorage StorageConfig + PeerBlockBodyStorage StorageConfig + BlockHeaderStorage StorageConfig + TxStorage StorageConfig + UnsignedTransactionStorage StorageConfig + RewardTxStorage StorageConfig + ShardHdrNonceHashStorage StorageConfig + MetaHdrNonceHashStorage StorageConfig + + ShardDataStorage StorageConfig + MetaBlockStorage StorageConfig + PeerDataStorage StorageConfig + + AccountsTrieStorage StorageConfig + BadBlocksCache CacheConfig + + TxBlockBodyDataPool CacheConfig + StateBlockBodyDataPool CacheConfig + PeerBlockBodyDataPool CacheConfig + BlockHeaderDataPool CacheConfig + BlockHeaderNoncesDataPool CacheConfig + TxDataPool CacheConfig + UnsignedTransactionDataPool CacheConfig + RewardTransactionDataPool CacheConfig + MetaBlockBodyDataPool CacheConfig + + MiniBlockHeaderHashesDataPool CacheConfig + ShardHeadersDataPool CacheConfig + MetaHeaderNoncesDataPool CacheConfig + + Logger LoggerConfig + Address AddressConfig + Hasher TypeConfig + MultisigHasher TypeConfig + Marshalizer TypeConfig + + ResourceStats ResourceStatsConfig + Heartbeat HeartbeatConfig + GeneralSettings GeneralSettingsConfig + Consensus TypeConfig + Explorer ExplorerConfig + + NTPConfig NTPConfig + EconomicsConfig EconomicsConfig } // NodeConfig will hold basic p2p settings type NodeConfig struct { - Port int - Seed string + Port int + Seed string } // MdnsPeerDiscoveryConfig will hold the mdns discovery config settings type MdnsPeerDiscoveryConfig struct { - Enabled bool - RefreshIntervalInSec int - ServiceTag string + Enabled bool + RefreshIntervalInSec int + ServiceTag string } // KadDhtPeerDiscoveryConfig will hold the kad-dht discovery config settings type KadDhtPeerDiscoveryConfig struct { - Enabled bool - RefreshIntervalInSec int - RandezVous string - InitialPeerList []string + Enabled bool + RefreshIntervalInSec int + RandezVous string + InitialPeerList []string } // P2PConfig will hold all the P2P settings type P2PConfig struct { - Node NodeConfig - MdnsPeerDiscovery MdnsPeerDiscoveryConfig - KadDhtPeerDiscovery KadDhtPeerDiscoveryConfig + Node NodeConfig + MdnsPeerDiscovery MdnsPeerDiscoveryConfig + KadDhtPeerDiscovery KadDhtPeerDiscoveryConfig } // ResourceStatsConfig will hold all resource stats settings type ResourceStatsConfig struct { - Enabled bool - RefreshIntervalInSec int + Enabled bool + RefreshIntervalInSec int } // HeartbeatConfig will hold all heartbeat settings type HeartbeatConfig struct { - Enabled bool - MinTimeToWaitBetweenBroadcastsInSec int - MaxTimeToWaitBetweenBroadcastsInSec int - DurationInSecToConsiderUnresponsive int + Enabled bool + MinTimeToWaitBetweenBroadcastsInSec int + MaxTimeToWaitBetweenBroadcastsInSec int + DurationInSecToConsiderUnresponsive int } // GeneralSettingsConfig will hold the general settings for a node type GeneralSettingsConfig struct { - DestinationShardAsObserver string - NetworkID string - StatusPollingIntervalSec int - NodeDisplayName string + DestinationShardAsObserver string + NetworkID string + StatusPollingIntervalSec int + NodeDisplayName string } // ExplorerConfig will hold the configuration for the explorer indexer type ExplorerConfig struct { - Enabled bool - IndexerURL string + Enabled bool + IndexerURL string } // ServersConfig will hold all the confidential settings for servers type ServersConfig struct { - ElasticSearch ElasticSearchConfig - Prometheus PrometheusConfig + ElasticSearch ElasticSearchConfig + Prometheus PrometheusConfig } // PrometheusConfig will hold configuration for prometheus, such as the join URL type PrometheusConfig struct { - PrometheusBaseURL string - JoinRoute string - StatusRoute string + PrometheusBaseURL string + JoinRoute string + StatusRoute string } // ElasticSearchConfig will hold the configuration for the elastic search type ElasticSearchConfig struct { - Username string - Password string + Username string + Password string } // FacadeConfig will hold different configuration option that will be passed to the main ElrondFacade type FacadeConfig struct { - RestApiPort string - PprofEnabled bool - Prometheus bool - PrometheusJoinURL string - PrometheusJobName string + RestApiPort string + PprofEnabled bool + Prometheus bool + PrometheusJoinURL string + PrometheusJobName string } diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index d65be438b78..d703d363073 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -1,70 +1,70 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorMock) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { @@ -76,4 +76,4 @@ func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 4c68e915350..3272ade3ce5 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -1,239 +1,239 @@ package commonSubround import ( - "encoding/hex" - "fmt" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/statusHandler" + "encoding/hex" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus/spos" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/statusHandler" ) var log = logger.DefaultLogger() // SubroundStartRound defines the data needed by the subround StartRound type SubroundStartRound struct { - *spos.Subround - processingThresholdPercentage int - getSubroundName func(subroundId int) string - executeStoredMessages func() - broadcastUnnotarisedBlocks func() + *spos.Subround + processingThresholdPercentage int + getSubroundName func(subroundId int) string + executeStoredMessages func() + broadcastUnnotarisedBlocks func() - appStatusHandler core.AppStatusHandler + appStatusHandler core.AppStatusHandler } // NewSubroundStartRound creates a SubroundStartRound object func NewSubroundStartRound( - baseSubround *spos.Subround, - extend func(subroundId int), - processingThresholdPercentage int, - getSubroundName func(subroundId int) string, - executeStoredMessages func(), - broadcastUnnotarisedBlocks func(), + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + getSubroundName func(subroundId int) string, + executeStoredMessages func(), + broadcastUnnotarisedBlocks func(), ) (*SubroundStartRound, error) { - err := checkNewSubroundStartRoundParams( - baseSubround, - broadcastUnnotarisedBlocks, - ) - if err != nil { - return nil, err - } - - srStartRound := SubroundStartRound{ - baseSubround, - processingThresholdPercentage, - getSubroundName, - executeStoredMessages, - broadcastUnnotarisedBlocks, - statusHandler.NewNilStatusHandler(), - } - srStartRound.Job = srStartRound.doStartRoundJob - srStartRound.Check = srStartRound.doStartRoundConsensusCheck - srStartRound.Extend = extend - - return &srStartRound, nil + err := checkNewSubroundStartRoundParams( + baseSubround, + broadcastUnnotarisedBlocks, + ) + if err != nil { + return nil, err + } + + srStartRound := SubroundStartRound{ + baseSubround, + processingThresholdPercentage, + getSubroundName, + executeStoredMessages, + broadcastUnnotarisedBlocks, + statusHandler.NewNilStatusHandler(), + } + srStartRound.Job = srStartRound.doStartRoundJob + srStartRound.Check = srStartRound.doStartRoundConsensusCheck + srStartRound.Extend = extend + + return &srStartRound, nil } func checkNewSubroundStartRoundParams( - baseSubround *spos.Subround, - broadcastUnnotarisedBlocks func(), + baseSubround *spos.Subround, + broadcastUnnotarisedBlocks func(), ) error { - if baseSubround == nil { - return spos.ErrNilSubround - } - if baseSubround.ConsensusState == nil { - return spos.ErrNilConsensusState - } - if broadcastUnnotarisedBlocks == nil { - return spos.ErrNilBroadcastUnnotarisedBlocks - } - - err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) - - return err + if baseSubround == nil { + return spos.ErrNilSubround + } + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + if broadcastUnnotarisedBlocks == nil { + return spos.ErrNilBroadcastUnnotarisedBlocks + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err } // SetAppStatusHandler method set appStatusHandler func (sr *SubroundStartRound) SetAppStatusHandler(ash core.AppStatusHandler) error { - if ash == nil || ash.IsInterfaceNil() { - return spos.ErrNilAppStatusHandler - } + if ash == nil || ash.IsInterfaceNil() { + return spos.ErrNilAppStatusHandler + } - sr.appStatusHandler = ash - return nil + sr.appStatusHandler = ash + return nil } // doStartRoundJob method does the job of the subround StartRound func (sr *SubroundStartRound) doStartRoundJob() bool { - sr.ResetConsensusState() - sr.RoundIndex = sr.Rounder().Index() - sr.RoundTimeStamp = sr.Rounder().TimeStamp() - return true + sr.ResetConsensusState() + sr.RoundIndex = sr.Rounder().Index() + sr.RoundTimeStamp = sr.Rounder().TimeStamp() + return true } // doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound func (sr *SubroundStartRound) doStartRoundConsensusCheck() bool { - if sr.RoundCanceled { - return false - } + if sr.RoundCanceled { + return false + } - if sr.Status(sr.Current()) == spos.SsFinished { - return true - } + if sr.Status(sr.Current()) == spos.SsFinished { + return true + } - if sr.initCurrentRound() { - return true - } + if sr.initCurrentRound() { + return true + } - return false + return false } func (sr *SubroundStartRound) initCurrentRound() bool { - if sr.BootStrapper().ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism - return false - } - sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "") + if sr.BootStrapper().ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism + return false + } + sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "") - err := sr.generateNextConsensusGroup(sr.Rounder().Index()) - if err != nil { - log.Error(err.Error()) + err := sr.generateNextConsensusGroup(sr.Rounder().Index()) + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - leader, err := sr.GetLeader() - if err != nil { - log.Error(err.Error()) + leader, err := sr.GetLeader() + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - msg := "" - if leader == sr.SelfPubKey() { - sr.appStatusHandler.Increment(core.MetricCountLeader) - sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "proposed") - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "proposer") - msg = " (my turn)" - } + msg := "" + if leader == sr.SelfPubKey() { + sr.appStatusHandler.Increment(core.MetricCountLeader) + sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "proposed") + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "proposer") + msg = " (my turn)" + } - log.Info(fmt.Sprintf("%sStep 0: preparing for this round with leader %s%s\n", - sr.SyncTimer().FormattedCurrentTime(), core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), msg)) + log.Info(fmt.Sprintf("%sStep 0: preparing for this round with leader %s%s\n", + sr.SyncTimer().FormattedCurrentTime(), core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), msg)) - pubKeys := sr.ConsensusGroup() + pubKeys := sr.ConsensusGroup() - selfIndex, err := sr.SelfConsensusGroupIndex() - if err != nil { - log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", - sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) + selfIndex, err := sr.SelfConsensusGroupIndex() + if err != nil { + log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", + sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) - sr.RoundCanceled = true + sr.RoundCanceled = true - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "not in consensus group") + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "not in consensus group") - return false - } + return false + } - sr.appStatusHandler.Increment(core.MetricCountConsensus) - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "participant") + sr.appStatusHandler.Increment(core.MetricCountConsensus) + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "participant") - err = sr.MultiSigner().Reset(pubKeys, uint16(selfIndex)) - if err != nil { - log.Error(err.Error()) + err = sr.MultiSigner().Reset(pubKeys, uint16(selfIndex)) + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - startTime := time.Time{} - startTime = sr.RoundTimeStamp - maxTime := sr.Rounder().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 - if sr.Rounder().RemainingTime(startTime, maxTime) < 0 { - log.Info(fmt.Sprintf("%scanceled round %d in subround %s, time is out\n", - sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) + startTime := time.Time{} + startTime = sr.RoundTimeStamp + maxTime := sr.Rounder().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.Rounder().RemainingTime(startTime, maxTime) < 0 { + log.Info(fmt.Sprintf("%scanceled round %d in subround %s, time is out\n", + sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - sr.SetStatus(sr.Current(), spos.SsFinished) + sr.SetStatus(sr.Current(), spos.SsFinished) - if leader == sr.SelfPubKey() { - //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary - } + if leader == sr.SelfPubKey() { + //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary + } - // execute stored messages which were received in this new round but before this initialisation - go sr.executeStoredMessages() + // execute stored messages which were received in this new round but before this initialisation + go sr.executeStoredMessages() - return true + return true } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { - currentHeader := sr.Blockchain().GetCurrentBlockHeader() - if currentHeader == nil { - currentHeader = sr.Blockchain().GetGenesisHeader() - if currentHeader == nil { - return spos.ErrNilHeader - } - } + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if currentHeader == nil { + currentHeader = sr.Blockchain().GetGenesisHeader() + if currentHeader == nil { + return spos.ErrNilHeader + } + } - randomSeed := currentHeader.GetRandSeed() + randomSeed := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", - core.ToB64(randomSeed)), - ) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", + core.ToB64(randomSeed)), + ) - shardId := sr.ShardCoordinator().SelfId() + shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( - randomSeed, - uint64(sr.RoundIndex), - shardId, - sr.NodesCoordinator(), - ) - if err != nil { - return err - } + nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + ) + if err != nil { + return err + } - log.Info(fmt.Sprintf("consensus group for round %d is formed by next validators:\n", - roundIndex)) + log.Info(fmt.Sprintf("consensus group for round %d is formed by next validators:\n", + roundIndex)) - for i := 0; i < len(nextConsensusGroup); i++ { - log.Info(fmt.Sprintf("%s", core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i]))))) - } + for i := 0; i < len(nextConsensusGroup); i++ { + log.Info(fmt.Sprintf("%s", core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i]))))) + } - log.Info(fmt.Sprintf("\n")) + log.Info(fmt.Sprintf("\n")) - sr.SetConsensusGroup(nextConsensusGroup) + sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses, uint64(sr.RoundIndex)) + sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses, uint64(sr.RoundIndex)) - return nil + return nil } diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 33cc044bf24..7e01117c570 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -1,110 +1,110 @@ package address import ( - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type specialAddresses struct { - elrond []byte - consensusRewardAddresses []string - burnAddress []byte - - epoch uint32 - round uint64 - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + elrond []byte + consensusRewardAddresses []string + burnAddress []byte + + epoch uint32 + round uint64 + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewSpecialAddressHolder creates a special address holder func NewSpecialAddressHolder( - elrond []byte, - burnAddress []byte, - adrConv state.AddressConverter, - shardCoordinator sharding.Coordinator, + elrond []byte, + burnAddress []byte, + adrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, ) (*specialAddresses, error) { - if elrond == nil { - return nil, data.ErrNilElrondAddress - } - if burnAddress == nil { - return nil, data.ErrNilBurnAddress - } - if adrConv == nil { - return nil, data.ErrNilAddressConverter - } - if shardCoordinator == nil { - return nil, data.ErrNilShardCoordinator - } - - sp := &specialAddresses{ - elrond: elrond, - burnAddress: burnAddress, - adrConv: adrConv, - shardCoordinator: shardCoordinator, - } - - return sp, nil + if elrond == nil { + return nil, data.ErrNilElrondAddress + } + if burnAddress == nil { + return nil, data.ErrNilBurnAddress + } + if adrConv == nil { + return nil, data.ErrNilAddressConverter + } + if shardCoordinator == nil { + return nil, data.ErrNilShardCoordinator + } + + sp := &specialAddresses{ + elrond: elrond, + burnAddress: burnAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + } + + return sp, nil } // SetElrondCommunityAddress sets elrond address func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { - sp.elrond = elrond + sp.elrond = elrond } // ElrondCommunityAddress provides elrond address func (sp *specialAddresses) ElrondCommunityAddress() []byte { - return sp.elrond + return sp.elrond } // BurnAddress provides burn address func (sp *specialAddresses) BurnAddress() []byte { - return sp.burnAddress + return sp.burnAddress } // SetConsensusData sets the consensus rewards addresses for the round func (sp *specialAddresses) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sp.consensusRewardAddresses = consensusRewardAddresses - sp.round = round - sp.epoch = epoch + sp.consensusRewardAddresses = consensusRewardAddresses + sp.round = round + sp.epoch = epoch } // LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - if len(sp.consensusRewardAddresses) == 0 { - return nil - } + if len(sp.consensusRewardAddresses) == 0 { + return nil + } - return []byte(sp.consensusRewardAddresses[0]) + return []byte(sp.consensusRewardAddresses[0]) } // ConsensusRewardAddresses provides the consensus reward addresses func (sp *specialAddresses) ConsensusRewardAddresses() []string { - return sp.consensusRewardAddresses + return sp.consensusRewardAddresses } func (sp *specialAddresses) Round() uint64 { - return sp.round + return sp.round } func (sp *specialAddresses) Epoch() uint32 { - return sp.epoch + return sp.epoch } // ShardIdForAddress calculates shard id for address func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { - convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) - if err != nil { - return 0, err - } + convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) + if err != nil { + return 0, err + } - return sp.shardCoordinator.ComputeId(convAdr), nil + return sp.shardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sp *specialAddresses) IsInterfaceNil() bool { - if sp == nil { - return true - } - return false + if sp == nil { + return true + } + return false } diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 8c24880ec80..0c77183dbdc 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -1,542 +1,542 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/core/random" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/topicResolverSender" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/core/random" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/topicResolverSender" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" ) const emptyExcludePeersOnTopic = "" type resolversContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - dataPools dataRetriever.PoolsHolder - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - intRandomizer dataRetriever.IntRandomizer - dataPacker dataRetriever.DataPacker + shardCoordinator sharding.Coordinator + messenger dataRetriever.TopicMessageHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + dataPools dataRetriever.PoolsHolder + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + intRandomizer dataRetriever.IntRandomizer + dataPacker dataRetriever.DataPacker } // NewResolversContainerFactory creates a new container filled with topic resolvers func NewResolversContainerFactory( - shardCoordinator sharding.Coordinator, - messenger dataRetriever.TopicMessageHandler, - store dataRetriever.StorageService, - marshalizer marshal.Marshalizer, - dataPools dataRetriever.PoolsHolder, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, - dataPacker dataRetriever.DataPacker, + shardCoordinator sharding.Coordinator, + messenger dataRetriever.TopicMessageHandler, + store dataRetriever.StorageService, + marshalizer marshal.Marshalizer, + dataPools dataRetriever.PoolsHolder, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + dataPacker dataRetriever.DataPacker, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, dataRetriever.ErrNilShardCoordinator - } - if messenger == nil || messenger.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMessenger - } - if store == nil || store.IsInterfaceNil() { - return nil, dataRetriever.ErrNilTxStorage - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMarshalizer - } - if dataPools == nil || dataPools.IsInterfaceNil() { - return nil, dataRetriever.ErrNilDataPoolHolder - } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { - return nil, dataRetriever.ErrNilUint64ByteSliceConverter - } - if dataPacker == nil || dataPacker.IsInterfaceNil() { - return nil, dataRetriever.ErrNilDataPacker - } - - return &resolversContainerFactory{ - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - dataPools: dataPools, - uint64ByteSliceConverter: uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - dataPacker: dataPacker, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, dataRetriever.ErrNilShardCoordinator + } + if messenger == nil || messenger.IsInterfaceNil() { + return nil, dataRetriever.ErrNilMessenger + } + if store == nil || store.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTxStorage + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, dataRetriever.ErrNilMarshalizer + } + if dataPools == nil || dataPools.IsInterfaceNil() { + return nil, dataRetriever.ErrNilDataPoolHolder + } + if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + return nil, dataRetriever.ErrNilUint64ByteSliceConverter + } + if dataPacker == nil || dataPacker.IsInterfaceNil() { + return nil, dataRetriever.ErrNilDataPacker + } + + return &resolversContainerFactory{ + shardCoordinator: shardCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + dataPools: dataPools, + uint64ByteSliceConverter: uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + dataPacker: dataPacker, + }, nil } // Create returns an interceptor container that will hold all interceptors in the system func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer, error) { - container := containers.NewResolversContainer() - - keys, resolverSlice, err := rcf.generateTxResolvers( - factory.TransactionTopic, - dataRetriever.TransactionUnit, - rcf.dataPools.Transactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateTxResolvers( - factory.UnsignedTransactionTopic, - dataRetriever.UnsignedTransactionUnit, - rcf.dataPools.UnsignedTransactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateTxResolvers( - factory.RewardsTransactionTopic, - dataRetriever.RewardTransactionUnit, - rcf.dataPools.RewardTransactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateHdrResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMiniBlocksResolvers() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generatePeerChBlockBodyResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMetachainShardHeaderResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - return container, nil + container := containers.NewResolversContainer() + + keys, resolverSlice, err := rcf.generateTxResolvers( + factory.TransactionTopic, + dataRetriever.TransactionUnit, + rcf.dataPools.Transactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.UnsignedTransactionTopic, + dataRetriever.UnsignedTransactionUnit, + rcf.dataPools.UnsignedTransactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.RewardsTransactionTopic, + dataRetriever.RewardTransactionUnit, + rcf.dataPools.RewardTransactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateHdrResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMiniBlocksResolvers() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generatePeerChBlockBodyResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMetachainShardHeaderResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + return container, nil } func (rcf *resolversContainerFactory) createTopicAndAssignHandler( - topicName string, - resolver dataRetriever.Resolver, - createChannel bool, + topicName string, + resolver dataRetriever.Resolver, + createChannel bool, ) (dataRetriever.Resolver, error) { - err := rcf.messenger.CreateTopic(topicName, createChannel) - if err != nil { - return nil, err - } + err := rcf.messenger.CreateTopic(topicName, createChannel) + if err != nil { + return nil, err + } - return resolver, rcf.messenger.RegisterMessageProcessor(topicName, resolver) + return resolver, rcf.messenger.RegisterMessageProcessor(topicName, resolver) } //------- Tx resolvers func (rcf *resolversContainerFactory) generateTxResolvers( - topic string, - unit dataRetriever.UnitType, - dataPool dataRetriever.ShardedDataCacherNotifier, + topic string, + unit dataRetriever.UnitType, + dataPool dataRetriever.ShardedDataCacherNotifier, ) ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator + shardC := rcf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - resolverSlice := make([]dataRetriever.Resolver, noOfShards) + keys := make([]string, noOfShards) + resolverSlice := make([]dataRetriever.Resolver, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierTx := topic + shardC.CommunicationIdentifier(idx) - excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierTx := topic + shardC.CommunicationIdentifier(idx) + excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := rcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool) - if err != nil { - return nil, nil, err - } + resolver, err := rcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool) + if err != nil { + return nil, nil, err + } - resolverSlice[idx] = resolver - keys[idx] = identifierTx - } + resolverSlice[idx] = resolver + keys[idx] = identifierTx + } - return keys, resolverSlice, nil + return keys, resolverSlice, nil } func (rcf *resolversContainerFactory) createTxResolver( - topic string, - excludedTopic string, - unit dataRetriever.UnitType, - dataPool dataRetriever.ShardedDataCacherNotifier, + topic string, + excludedTopic string, + unit dataRetriever.UnitType, + dataPool dataRetriever.ShardedDataCacherNotifier, ) (dataRetriever.Resolver, error) { - txStorer := rcf.store.GetStorer(unit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) - if err != nil { - return nil, err - } - - //TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data - // this will improve the serving of transactions as the searching will be done only on 2 sharded data units - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - topic, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - uint32(0), - ) - if err != nil { - return nil, err - } - - resolver, err := resolvers.NewTxResolver( - resolverSender, - dataPool, - txStorer, - rcf.marshalizer, - rcf.dataPacker, - ) - if err != nil { - return nil, err - } - - //add on the request topic - return rcf.createTopicAndAssignHandler( - topic+resolverSender.TopicRequestSuffix(), - resolver, - false) + txStorer := rcf.store.GetStorer(unit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) + if err != nil { + return nil, err + } + + //TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data + // this will improve the serving of transactions as the searching will be done only on 2 sharded data units + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + uint32(0), + ) + if err != nil { + return nil, err + } + + resolver, err := resolvers.NewTxResolver( + resolverSender, + dataPool, + txStorer, + rcf.marshalizer, + rcf.dataPacker, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + resolver, + false) } //------- Hdr resolver func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one intrashard header topic - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) - hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - err = rcf.createTopicHeadersForMetachain() - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one intrashard header topic + identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) + hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.Headers(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + err = rcf.createTopicHeadersForMetachain() + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } func (rcf *resolversContainerFactory) createTopicHeadersForMetachain() error { - shardC := rcf.shardCoordinator - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + shardC := rcf.shardCoordinator + identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - return rcf.messenger.CreateTopic(identifierHdr, true) + return rcf.messenger.CreateTopic(identifierHdr, true) } //------- MiniBlocks resolvers func (rcf *resolversContainerFactory) generateMiniBlocksResolvers() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - resolverSlice := make([]dataRetriever.Resolver, noOfShards) + shardC := rcf.shardCoordinator + noOfShards := shardC.NumberOfShards() + keys := make([]string, noOfShards) + resolverSlice := make([]dataRetriever.Resolver, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) - excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) + excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := rcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic) - if err != nil { - return nil, nil, err - } + resolver, err := rcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic) + if err != nil { + return nil, nil, err + } - resolverSlice[idx] = resolver - keys[idx] = identifierMiniBlocks - } + resolverSlice[idx] = resolver + keys[idx] = identifierMiniBlocks + } - return keys, resolverSlice, nil + return keys, resolverSlice, nil } func (rcf *resolversContainerFactory) createMiniBlocksResolver(topic string, excludedTopic string) (dataRetriever.Resolver, error) { - miniBlocksStorer := rcf.store.GetStorer(dataRetriever.MiniBlockUnit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) - if err != nil { - return nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - topic, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - uint32(0), - ) - if err != nil { - return nil, err - } - - txBlkResolver, err := resolvers.NewGenericBlockBodyResolver( - resolverSender, - rcf.dataPools.MiniBlocks(), - miniBlocksStorer, - rcf.marshalizer, - ) - if err != nil { - return nil, err - } - - //add on the request topic - return rcf.createTopicAndAssignHandler( - topic+resolverSender.TopicRequestSuffix(), - txBlkResolver, - false) + miniBlocksStorer := rcf.store.GetStorer(dataRetriever.MiniBlockUnit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) + if err != nil { + return nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + uint32(0), + ) + if err != nil { + return nil, err + } + + txBlkResolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + rcf.dataPools.MiniBlocks(), + miniBlocksStorer, + rcf.marshalizer, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + txBlkResolver, + false) } //------- PeerChBlocks resolvers func (rcf *resolversContainerFactory) generatePeerChBlockBodyResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one intrashard peer change blocks topic - identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - peerBlockBodyStorer := rcf.store.GetStorer(dataRetriever.PeerChangesUnit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierPeerCh, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierPeerCh, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - resolver, err := resolvers.NewGenericBlockBodyResolver( - resolverSender, - rcf.dataPools.MiniBlocks(), - peerBlockBodyStorer, - rcf.marshalizer, - ) - if err != nil { - return nil, nil, err - } - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierPeerCh+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierPeerCh}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one intrashard peer change blocks topic + identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + peerBlockBodyStorer := rcf.store.GetStorer(dataRetriever.PeerChangesUnit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierPeerCh, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierPeerCh, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + resolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + rcf.dataPools.MiniBlocks(), + peerBlockBodyStorer, + rcf.marshalizer, + ) + if err != nil { + return nil, nil, err + } + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierPeerCh+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierPeerCh}, []dataRetriever.Resolver{resolver}, nil } //------- MetachainShardHeaderResolvers func (rcf *resolversContainerFactory) generateMetachainShardHeaderResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one metachain header topic - //example: shardHeadersForMetachain_0_META - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) - hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one metachain header topic + //example: shardHeadersForMetachain_0_META + identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) + hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.Headers(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } //------- MetaBlockHeaderResolvers func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one metachain header block topic - //this is: metachainBlocks - identifierHdr := factory.MetachainBlocksTopic - hdrStorer := rcf.store.GetStorer(dataRetriever.MetaBlockUnit) - - metaAndCrtShardTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, metaAndCrtShardTopic, excludedPeersOnTopic) - if err != nil { - return nil, nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - sharding.MetachainShardId, - ) - if err != nil { - return nil, nil, err - } - - hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.MetaBlocks(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one metachain header block topic + //this is: metachainBlocks + identifierHdr := factory.MetachainBlocksTopic + hdrStorer := rcf.store.GetStorer(dataRetriever.MetaBlockUnit) + + metaAndCrtShardTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, metaAndCrtShardTopic, excludedPeersOnTopic) + if err != nil { + return nil, nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + sharding.MetachainShardId, + ) + if err != nil { + return nil, nil, err + } + + hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.MetaBlocks(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } // IsInterfaceNil returns true if there is no value under the interface func (rcf *resolversContainerFactory) IsInterfaceNil() bool { - if rcf == nil { - return true - } - return false + if rcf == nil { + return true + } + return false } diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index aee73e043d8..c07453d1a55 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -1,431 +1,431 @@ package shard_test import ( - "errors" - "strings" - "testing" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "errors" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) var errExpected = errors.New("expected error") func createStubTopicMessageHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() + tmhs := mock.NewTopicMessageHandlerStub() - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { - if matchStrToErrOnCreate == "" { - return nil - } + tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + if matchStrToErrOnCreate == "" { + return nil + } - if strings.Contains(name, matchStrToErrOnCreate) { - return errExpected - } + if strings.Contains(name, matchStrToErrOnCreate) { + return errExpected + } - return nil - } + return nil + } - tmhs.RegisterMessageProcessorCalled = func(topic string, handler p2p.MessageProcessor) error { - if matchStrToErrOnRegister == "" { - return nil - } + tmhs.RegisterMessageProcessorCalled = func(topic string, handler p2p.MessageProcessor) error { + if matchStrToErrOnRegister == "" { + return nil + } - if strings.Contains(topic, matchStrToErrOnRegister) { - return errExpected - } + if strings.Contains(topic, matchStrToErrOnRegister) { + return errExpected + } - return nil - } + return nil + } - return tmhs + return tmhs } func createDataPools() dataRetriever.PoolsHolder { - pools := &mock.PoolsHolderStub{} - pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - } - pools.MiniBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - - return pools + pools := &mock.PoolsHolderStub{} + pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.HeadersCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + } + pools.MiniBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + + return pools } func createStore() dataRetriever.StorageService { - return &mock.ChainStorerMock{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{} - }, - } + return &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{} + }, + } } //------- NewResolversContainerFactory func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - nil, - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + nil, + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - nil, - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMessenger, err) } func TestNewResolversContainerFactory_NilBlockchainShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - nil, - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilTxStorage, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + nil, + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilTxStorage, err) } func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - nil, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + nil, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) } func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - nil, - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + nil, + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) } func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - nil, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + nil, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) } func TestNewResolversContainerFactory_NilSliceSplitterShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - nil, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + nil, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.NotNil(t, rcf) - assert.Nil(t, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.NotNil(t, rcf) + assert.Nil(t, err) } //------- Create func TestResolversContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.TransactionTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.TransactionTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.HeadersTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.HeadersTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.MiniBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.MiniBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.PeerChBodyTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.PeerChBodyTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.TransactionTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.TransactionTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.HeadersTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.HeadersTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.MiniBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.MiniBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.PeerChBodyTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.PeerChBodyTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.NotNil(t, container) - assert.Nil(t, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.NotNil(t, container) + assert.Nil(t, err) } func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { - t.Parallel() - - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - rcf, _ := shard.NewResolversContainerFactory( - shardCoordinator, - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, _ := rcf.Create() - - numResolverSCRs := noOfShards - numResolverTxs := noOfShards - numResolverRewardTxs := noOfShards - numResolverHeaders := 1 - numResolverMiniBlocks := noOfShards - numResolverPeerChanges := 1 - numResolverMetachainShardHeaders := 1 - numResolverMetaBlockHeaders := 1 - totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs - - assert.Equal(t, totalResolvers, container.Len()) + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + rcf, _ := shard.NewResolversContainerFactory( + shardCoordinator, + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, _ := rcf.Create() + + numResolverSCRs := noOfShards + numResolverTxs := noOfShards + numResolverRewardTxs := noOfShards + numResolverHeaders := 1 + numResolverMiniBlocks := noOfShards + numResolverPeerChanges := 1 + numResolverMetachainShardHeaders := 1 + numResolverMetaBlockHeaders := 1 + totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + + numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs + + assert.Equal(t, totalResolvers, container.Len()) } diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 67338af603c..a4508db6a0c 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -1,108 +1,108 @@ package mock import ( - "time" + "time" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" ) // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - NrCommitBlockCalled uint32 - Marshalizer marshal.Marshalizer - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddressesCalled func([]string) + NrCommitBlockCalled uint32 + Marshalizer marshal.Marshalizer + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddressesCalled func([]string) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorMock) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateTxBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } // DecodeBlockBody method decodes block body from a given byte array func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.Body + var body block.Body - err := blProcMock.Marshalizer.Unmarshal(&body, dta) - if err != nil { - return nil - } + err := blProcMock.Marshalizer.Unmarshal(&body, dta) + if err != nil { + return nil + } - return body + return body } // DecodeBlockHeader method decodes block header from a given byte array func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.Header + var header block.Header - err := blProcMock.Marshalizer.Unmarshal(&header, dta) - if err != nil { - return nil - } + err := blProcMock.Marshalizer.Unmarshal(&header, dta) + if err != nil { + return nil + } - return &header + return &header } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { - if blProcMock.SetConsensusRewardAddressesCalled != nil { - blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) - } + if blProcMock.SetConsensusRewardAddressesCalled != nil { + blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) + } } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index f366ba37582..b00c01ca711 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -1,80 +1,79 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type SpecialAddressHandlerMock struct { - ElrondCommunityAddressCalled func() []byte - LeaderAddressCalled func() []byte - BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) (uint32, error) - AdrConv state.AddressConverter - ShardCoordinator sharding.Coordinator - - - addresses []string - epoch uint32 - round uint64 + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + + addresses []string + epoch uint32 + round uint64 } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses + sh.addresses = consensusRewardAddresses } func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { - if sh.BurnAddressCalled == nil { - return []byte("burn0000000000000000000000000000") - } + if sh.BurnAddressCalled == nil { + return []byte("burn0000000000000000000000000000") + } - return sh.BurnAddressCalled() + return sh.BurnAddressCalled() } func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { - if sh.ElrondCommunityAddressCalled == nil { - return []byte("elrond00000000000000000000000000") - } + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond00000000000000000000000000") + } - return sh.ElrondCommunityAddressCalled() + return sh.ElrondCommunityAddressCalled() } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.LeaderAddressCalled == nil { - return []byte("leader0000000000000000000000000000") - } + if sh.LeaderAddressCalled == nil { + return []byte("leader0000000000000000000000000000") + } - return sh.LeaderAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + return sh.round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + return sh.epoch } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return 0, err - } + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } - return sh.ShardCoordinator.ComputeId(convAdr), nil + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { - if sh == nil { - return true - } - return false -} \ No newline at end of file + if sh == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index 651638218fe..8a607f4dfff 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -1,158 +1,158 @@ package block import ( - "context" - "fmt" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) const broadcastDelay = 2 * time.Second func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - nodesPerShard := 4 - nbMetaNodes := 4 - nbShards := 1 - consensusGroupSize := 3 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - seedAddress := integrationTests.GetConnectableAddress(advertiser) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinator( - nodesPerShard, - nbMetaNodes, - nbShards, - consensusGroupSize, - consensusGroupSize, - seedAddress, - ) - - for _, nodes := range nodesMap { - integrationTests.DisplayAndStartNodes(nodes) - } - - defer func() { - _ = advertiser.Close() - for _, nodes := range nodesMap { - for _, n := range nodes { - _ = n.Node.Stop() - } - } - }() - - fmt.Println("Shard node generating header and block body...") - - // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus - randomness := []byte("random seed") - round := uint64(1) - nonce := uint64(1) - - body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) - - nodesMap[0][0].BroadcastBlock(body, header) - - time.Sleep(broadcastDelay) - - headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) - headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) - - // all nodes in metachain have the block header in pool as interceptor validates it - for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } - - // all nodes in shard have the block in pool as interceptor validates it - for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Shard node generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + + nodesMap[0][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain have the block header in pool as interceptor validates it + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard have the block in pool as interceptor validates it + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } } func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - nodesPerShard := 4 - nbMetaNodes := 4 - nbShards := 1 - consensusGroupSize := 3 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - seedAddress := integrationTests.GetConnectableAddress(advertiser) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinator( - nodesPerShard, - nbMetaNodes, - nbShards, - consensusGroupSize, - consensusGroupSize, - seedAddress, - ) - - for _, nodes := range nodesMap { - integrationTests.DisplayAndStartNodes(nodes) - } - - defer func() { - _ = advertiser.Close() - for _, nodes := range nodesMap { - for _, n := range nodes { - _ = n.Node.Stop() - } - } - }() - - fmt.Println("Metachain node Generating header and block body...") - - // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus - randomness := []byte("random seed") - round := uint64(1) - nonce := uint64(1) - - body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( - sharding.MetachainShardId, - nodesMap, - round, - nonce, - randomness, - ) - - nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) - - time.Sleep(broadcastDelay) - - headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) - headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) - - // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus - for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } - - // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus - for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Metachain node Generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( + sharding.MetachainShardId, + nodesMap, + round, + nonce, + randomness, + ) + + nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 8b973ba8060..3f7d9fb26cd 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -1,64 +1,64 @@ package smartContract import ( - "context" - "crypto/ecdsa" - "encoding/base64" - "encoding/hex" - "fmt" - "math/big" - "math/rand" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "context" + "crypto/ecdsa" + "encoding/base64" + "encoding/hex" + "fmt" + "math/big" + "math/rand" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) var r *rand.Rand @@ -72,835 +72,835 @@ var addrConv, _ = addressConverters.NewPlainAddressConverter(32, "0x") var opGas = int64(1) func init() { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + r = rand.New(rand.NewSource(time.Now().UnixNano())) } type testNode struct { - node *node.Node - messenger p2p.Messenger - shardId uint32 - accntState state.AccountsAdapter - blkc data.ChainHandler - store dataRetriever.StorageService - blkProcessor process.BlockProcessor - txProcessor process.TransactionProcessor - txCoordinator process.TransactionCoordinator - scrForwarder process.IntermediateTransactionHandler - broadcastMessenger consensus.BroadcastMessenger - sk crypto.PrivateKey - pk crypto.PublicKey - dPool dataRetriever.PoolsHolder - resFinder dataRetriever.ResolversFinder - headersRecv int32 - miniblocksRecv int32 - mutHeaders sync.Mutex - headersHashes [][]byte - headers []data.HeaderHandler - mutMiniblocks sync.Mutex - miniblocksHashes [][]byte - miniblocks []*dataBlock.MiniBlock - metachainHdrRecv int32 - txsRecv int32 + node *node.Node + messenger p2p.Messenger + shardId uint32 + accntState state.AccountsAdapter + blkc data.ChainHandler + store dataRetriever.StorageService + blkProcessor process.BlockProcessor + txProcessor process.TransactionProcessor + txCoordinator process.TransactionCoordinator + scrForwarder process.IntermediateTransactionHandler + broadcastMessenger consensus.BroadcastMessenger + sk crypto.PrivateKey + pk crypto.PublicKey + dPool dataRetriever.PoolsHolder + resFinder dataRetriever.ResolversFinder + headersRecv int32 + miniblocksRecv int32 + mutHeaders sync.Mutex + headersHashes [][]byte + headers []data.HeaderHandler + mutMiniblocks sync.Mutex + miniblocksHashes [][]byte + miniblocks []*dataBlock.MiniBlock + metachainHdrRecv int32 + txsRecv int32 } type keyPair struct { - sk crypto.PrivateKey - pk crypto.PublicKey + sk crypto.PrivateKey + pk crypto.PublicKey } type cryptoParams struct { - keyGen crypto.KeyGenerator - keys map[uint32][]*keyPair - singleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner } func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } - - return validatorsMap + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap } func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - - keysMap := make(map[uint32][]*keyPair) - keyPairs := make([]*keyPair, nodesPerShard) - for shardId := 0; shardId < nbShards; shardId++ { - for n := 0; n < nodesPerShard; n++ { - kp := &keyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[uint32(shardId)] = keyPairs - } - - keyPairs = make([]*keyPair, nbMetaNodes) - for n := 0; n < nbMetaNodes; n++ { - kp := &keyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[sharding.MetachainShardId] = keyPairs - - params := &cryptoParams{ - keys: keysMap, - keyGen: keyGen, - singleSigner: singleSigner, - } - - return params + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params } func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { - keysMap := make(map[uint32][]string, 0) - - for shardId, pairList := range keyPairMap { - shardKeys := make([]string, len(pairList)) - for i, pair := range pairList { - bytes, _ := pair.pk.ToByteArray() - shardKeys[i] = string(bytes) - } - keysMap[shardId] = shardKeys - } - - return keysMap + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap } func createTestShardChain() *blockchain.BlockChain { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - blockChain, _ := blockchain.NewBlockChain( - badBlockCache, - ) - blockChain.GenesisHeader = &dataBlock.Header{} - genesisHeaderM, _ := testMarshalizer.Marshal(blockChain.GenesisHeader) + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + ) + blockChain.GenesisHeader = &dataBlock.Header{} + genesisHeaderM, _ := testMarshalizer.Marshal(blockChain.GenesisHeader) - blockChain.SetGenesisHeaderHash(testHasher.Compute(string(genesisHeaderM))) + blockChain.SetGenesisHeaderHash(testHasher.Compute(string(genesisHeaderM))) - return blockChain + return blockChain } func createMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + unit, _ := storageUnit.NewStorageUnit(cache, persist) + return unit } func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - - for i := uint32(0); i < numOfShards; i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) - } - - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) + + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) + } + + return store } func createTestShardDataPool() dataRetriever.PoolsHolder { - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - dPool, _ := dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardsTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlocks, - ) - - return dPool + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + dPool, _ := dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardsTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlocks, + ) + + return dPool } func createAccountsDB() *state.AccountsDB { - hasher := sha256.Sha256{} - store := createMemUnit() - - tr, _ := trie.NewTrie(store, testMarshalizer, hasher) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, testMarshalizer, &mock.AccountsFactoryStub{ - CreateAccountCalled: func(address state.AddressContainer, tracker state.AccountTracker) (wrapper state.AccountHandler, e error) { - return state.NewAccount(address, tracker) - }, - }) - return adb + hasher := sha256.Sha256{} + store := createMemUnit() + + tr, _ := trie.NewTrie(store, testMarshalizer, hasher) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, testMarshalizer, &mock.AccountsFactoryStub{ + CreateAccountCalled: func(address state.AddressContainer, tracker state.AccountTracker) (wrapper state.AccountHandler, e error) { + return state.NewAccount(address, tracker) + }, + }) + return adb } func createNetNode( - dPool dataRetriever.PoolsHolder, - accntAdapter state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - targetShardId uint32, - initialAddr string, - params *cryptoParams, - keysIndex int, + dPool dataRetriever.PoolsHolder, + accntAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + targetShardId uint32, + initialAddr string, + params *cryptoParams, + keysIndex int, ) ( - *node.Node, - p2p.Messenger, - dataRetriever.ResolversFinder, - process.BlockProcessor, - process.TransactionProcessor, - process.TransactionCoordinator, - process.IntermediateTransactionHandler, - data.ChainHandler, - dataRetriever.StorageService) { - - messenger := createMessengerWithKadDht(context.Background(), initialAddr) - keyPair := params.keys[targetShardId][keysIndex] - pkBuff, _ := keyPair.pk.ToByteArray() - fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) - - blkc := createTestShardChain() - store := createTestShardStore(shardCoordinator.NumberOfShards()) - uint64Converter := uint64ByteSlice.NewBigEndianConverter() - dataPacker, _ := partitioning.NewSizeDataPacker(testMarshalizer) - - interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - messenger, - store, - testMarshalizer, - testHasher, - params.keyGen, - params.singleSigner, - testMultiSig, - dPool, - testAddressConverter, - ) - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - - resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - messenger, - store, - testMarshalizer, - dPool, - uint64Converter, - dataPacker, - ) - resolversContainer, _ := resolversContainerFactory.Create() - resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( - resolversFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - 100, - ) - - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - testMarshalizer, - testHasher, - testAddressConverter, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - store, - dPool, - ) - interimProcContainer, _ := interimProcFactory.Create() - scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) - rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) - rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( - accntAdapter, - addrConv, - shardCoordinator, - rewardsInter, - ) - vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) - vmContainer := &mock.VMContainerMock{ - GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { - return vm, nil - }} - argsParser, _ := smartContract.NewAtArgumentParser() - scProcessor, _ := smartContract.NewSmartContractProcessor( - vmContainer, - argsParser, - testHasher, - testMarshalizer, - accntAdapter, - blockChainHook, - addrConv, - shardCoordinator, - scForwarder, - rewardsHandler, - ) - - txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) - - txProcessor, _ := transaction.NewTxProcessor( - accntAdapter, - testHasher, - testAddressConverter, - testMarshalizer, - shardCoordinator, - scProcessor, - rewardsHandler, - txTypeHandler, - ) - - fact, _ := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - store, - testMarshalizer, - testHasher, - dPool, - testAddressConverter, - accntAdapter, - requestHandler, - txProcessor, - scProcessor, - scProcessor, - rewardProcessor, - ) - container, _ := fact.Create() - - tc, _ := coordinator.NewTransactionCoordinator( - shardCoordinator, - accntAdapter, - dPool, - requestHandler, - container, - interimProcContainer, - ) - - genesisBlocks := createGenesisBlocks(shardCoordinator) - blockProcessor, _ := block.NewShardProcessor( - &mock.ServiceContainerMock{}, - dPool, - store, - testHasher, - testMarshalizer, - accntAdapter, - shardCoordinator, - nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - genesisBlocks, - requestHandler, - tc, - uint64Converter, - ) - - _ = blkc.SetGenesisHeader(genesisBlocks[shardCoordinator.SelfId()]) - - n, err := node.NewNode( - node.WithMessenger(messenger), - node.WithMarshalizer(testMarshalizer), - node.WithHasher(testHasher), - node.WithDataPool(dPool), - node.WithAddressConverter(testAddressConverter), - node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(params.keyGen), - node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(blkc), - node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(params.singleSigner), - node.WithTxSignPrivKey(keyPair.sk), - node.WithTxSignPubKey(keyPair.pk), - node.WithInterceptorsContainer(interceptorsContainer), - node.WithResolversFinder(resolversFinder), - node.WithBlockProcessor(blockProcessor), - node.WithDataStore(store), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - - if err != nil { - fmt.Println(err.Error()) - } - - return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + *node.Node, + p2p.Messenger, + dataRetriever.ResolversFinder, + process.BlockProcessor, + process.TransactionProcessor, + process.TransactionCoordinator, + process.IntermediateTransactionHandler, + data.ChainHandler, + dataRetriever.StorageService) { + + messenger := createMessengerWithKadDht(context.Background(), initialAddr) + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) + + blkc := createTestShardChain() + store := createTestShardStore(shardCoordinator.NumberOfShards()) + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + dataPacker, _ := partitioning.NewSizeDataPacker(testMarshalizer) + + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + messenger, + store, + testMarshalizer, + testHasher, + params.keyGen, + params.singleSigner, + testMultiSig, + dPool, + testAddressConverter, + ) + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + + resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + messenger, + store, + testMarshalizer, + dPool, + uint64Converter, + dataPacker, + ) + resolversContainer, _ := resolversContainerFactory.Create() + resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) + + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + testMarshalizer, + testHasher, + testAddressConverter, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + store, + dPool, + ) + interimProcContainer, _ := interimProcFactory.Create() + scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + rewardsInter, + ) + vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) + vmContainer := &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return vm, nil + }} + argsParser, _ := smartContract.NewAtArgumentParser() + scProcessor, _ := smartContract.NewSmartContractProcessor( + vmContainer, + argsParser, + testHasher, + testMarshalizer, + accntAdapter, + blockChainHook, + addrConv, + shardCoordinator, + scForwarder, + rewardsHandler, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) + + txProcessor, _ := transaction.NewTxProcessor( + accntAdapter, + testHasher, + testAddressConverter, + testMarshalizer, + shardCoordinator, + scProcessor, + rewardsHandler, + txTypeHandler, + ) + + fact, _ := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + store, + testMarshalizer, + testHasher, + dPool, + testAddressConverter, + accntAdapter, + requestHandler, + txProcessor, + scProcessor, + scProcessor, + rewardProcessor, + ) + container, _ := fact.Create() + + tc, _ := coordinator.NewTransactionCoordinator( + shardCoordinator, + accntAdapter, + dPool, + requestHandler, + container, + interimProcContainer, + ) + + genesisBlocks := createGenesisBlocks(shardCoordinator) + blockProcessor, _ := block.NewShardProcessor( + &mock.ServiceContainerMock{}, + dPool, + store, + testHasher, + testMarshalizer, + accntAdapter, + shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + genesisBlocks, + requestHandler, + tc, + uint64Converter, + ) + + _ = blkc.SetGenesisHeader(genesisBlocks[shardCoordinator.SelfId()]) + + n, err := node.NewNode( + node.WithMessenger(messenger), + node.WithMarshalizer(testMarshalizer), + node.WithHasher(testHasher), + node.WithDataPool(dPool), + node.WithAddressConverter(testAddressConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithKeyGen(params.keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(blkc), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithMultiSigner(testMultiSig), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), + node.WithInterceptorsContainer(interceptorsContainer), + node.WithResolversFinder(resolversFinder), + node.WithBlockProcessor(blockProcessor), + node.WithDataStore(store), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + + if err != nil { + fmt.Println(err.Error()) + } + + return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - - libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( - ctx, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - discovery.NewKadDhtPeerDiscoverer(time.Second, "test", []string{initialAddr}), - ) - if err != nil { - fmt.Println(err.Error()) - } - - return libP2PMes + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( + ctx, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + discovery.NewKadDhtPeerDiscoverer(time.Second, "test", []string{initialAddr}), + ) + if err != nil { + fmt.Println(err.Error()) + } + + return libP2PMes } func getConnectableAddress(mes p2p.Messenger) string { - for _, addr := range mes.Addresses() { - if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { - continue - } - return addr - } - return "" + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { + continue + } + return addr + } + return "" } func displayAndStartNodes(nodes map[uint32][]*testNode) { - for _, nodeList := range nodes { - for _, n := range nodeList { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() - - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() - } - } + for _, nodeList := range nodes { + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } + } } func createNodes( - numOfShards int, - nodesPerShard int, - serviceID string, + numOfShards int, + nodesPerShard int, + serviceID string, ) map[uint32][]*testNode { - //first node generated will have is pk belonging to firstSkShardId - numMetaChainNodes := 1 - nodes := make(map[uint32][]*testNode) - cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) - keysMap := pubKeysMapFromKeysMap(cp.keys) - validatorsMap := genValidatorsFromPubKeys(keysMap) - - for shardId := 0; shardId < numOfShards; shardId++ { - shardNodes := make([]*testNode, nodesPerShard) - - for j := 0; j < nodesPerShard; j++ { - testNode := &testNode{ - dPool: createTestShardDataPool(), - shardId: uint32(shardId), - } - - shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - testHasher, - uint32(shardId), - uint32(numOfShards), - validatorsMap, - ) - - accntAdapter := createAccountsDB() - n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( - testNode.dPool, - accntAdapter, - shardCoordinator, - nodesCoordinator, - testNode.shardId, - serviceID, - cp, - j, - ) - _ = n.CreateShardedStores() - - KeyPair := cp.keys[uint32(shardId)][j] - testNode.node = n - testNode.sk = KeyPair.sk - testNode.messenger = mes - testNode.pk = KeyPair.pk - testNode.resFinder = resFinder - testNode.accntState = accntAdapter - testNode.blkProcessor = blkProcessor - testNode.txProcessor = txProcessor - testNode.scrForwarder = scrForwarder - testNode.blkc = blkc - testNode.store = store - testNode.txCoordinator = transactionCoordinator - testNode.dPool.Headers().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.headersRecv, 1) - testNode.mutHeaders.Lock() - testNode.headersHashes = append(testNode.headersHashes, key) - header, _ := testNode.dPool.Headers().Peek(key) - testNode.headers = append(testNode.headers, header.(data.HeaderHandler)) - testNode.mutHeaders.Unlock() - }) - testNode.dPool.MiniBlocks().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.miniblocksRecv, 1) - testNode.mutMiniblocks.Lock() - testNode.miniblocksHashes = append(testNode.miniblocksHashes, key) - miniblock, _ := testNode.dPool.MiniBlocks().Peek(key) - testNode.miniblocks = append(testNode.miniblocks, miniblock.(*dataBlock.MiniBlock)) - testNode.mutMiniblocks.Unlock() - }) - testNode.dPool.MetaBlocks().RegisterHandler(func(key []byte) { - fmt.Printf("Got metachain header: %v\n", base64.StdEncoding.EncodeToString(key)) - atomic.AddInt32(&testNode.metachainHdrRecv, 1) - }) - testNode.dPool.Transactions().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.txsRecv, 1) - }) - testNode.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - testMarshalizer, - mes, - shardCoordinator, - KeyPair.sk, - &singlesig.SchnorrSigner{}, - ) - - shardNodes[j] = testNode - } - - nodes[uint32(shardId)] = shardNodes - } - - metaNodes := make([]*testNode, numMetaChainNodes) - for i := 0; i < numMetaChainNodes; i++ { - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - testHasher, - sharding.MetachainShardId, - uint32(numOfShards), - validatorsMap, - ) - - metaNodes[i] = createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - nodesCoordinator, - serviceID, - cp, - i, - ) - } - - nodes[sharding.MetachainShardId] = metaNodes - - return nodes + //first node generated will have is pk belonging to firstSkShardId + numMetaChainNodes := 1 + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + + for shardId := 0; shardId < numOfShards; shardId++ { + shardNodes := make([]*testNode, nodesPerShard) + + for j := 0; j < nodesPerShard; j++ { + testNode := &testNode{ + dPool: createTestShardDataPool(), + shardId: uint32(shardId), + } + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + validatorsMap, + ) + + accntAdapter := createAccountsDB() + n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + testNode.dPool, + accntAdapter, + shardCoordinator, + nodesCoordinator, + testNode.shardId, + serviceID, + cp, + j, + ) + _ = n.CreateShardedStores() + + KeyPair := cp.keys[uint32(shardId)][j] + testNode.node = n + testNode.sk = KeyPair.sk + testNode.messenger = mes + testNode.pk = KeyPair.pk + testNode.resFinder = resFinder + testNode.accntState = accntAdapter + testNode.blkProcessor = blkProcessor + testNode.txProcessor = txProcessor + testNode.scrForwarder = scrForwarder + testNode.blkc = blkc + testNode.store = store + testNode.txCoordinator = transactionCoordinator + testNode.dPool.Headers().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.headersRecv, 1) + testNode.mutHeaders.Lock() + testNode.headersHashes = append(testNode.headersHashes, key) + header, _ := testNode.dPool.Headers().Peek(key) + testNode.headers = append(testNode.headers, header.(data.HeaderHandler)) + testNode.mutHeaders.Unlock() + }) + testNode.dPool.MiniBlocks().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.miniblocksRecv, 1) + testNode.mutMiniblocks.Lock() + testNode.miniblocksHashes = append(testNode.miniblocksHashes, key) + miniblock, _ := testNode.dPool.MiniBlocks().Peek(key) + testNode.miniblocks = append(testNode.miniblocks, miniblock.(*dataBlock.MiniBlock)) + testNode.mutMiniblocks.Unlock() + }) + testNode.dPool.MetaBlocks().RegisterHandler(func(key []byte) { + fmt.Printf("Got metachain header: %v\n", base64.StdEncoding.EncodeToString(key)) + atomic.AddInt32(&testNode.metachainHdrRecv, 1) + }) + testNode.dPool.Transactions().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.txsRecv, 1) + }) + testNode.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + testMarshalizer, + mes, + shardCoordinator, + KeyPair.sk, + &singlesig.SchnorrSigner{}, + ) + + shardNodes[j] = testNode + } + + nodes[uint32(shardId)] = shardNodes + } + + metaNodes := make([]*testNode, numMetaChainNodes) + for i := 0; i < numMetaChainNodes; i++ { + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + validatorsMap, + ) + + metaNodes[i] = createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + cp, + i, + ) + } + + nodes[sharding.MetachainShardId] = metaNodes + + return nodes } func createTestMetaChain() data.ChainHandler { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - metaChain, _ := blockchain.NewMetaChain( - badBlockCache, - ) - metaChain.GenesisBlock = &dataBlock.MetaBlock{} - - return metaChain + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + metaChain, _ := blockchain.NewMetaChain( + badBlockCache, + ) + metaChain.GenesisBlock = &dataBlock.MetaBlock{} + + return metaChain } func createTestMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - for i := uint32(0); i < coordinator.NumberOfShards(); i++ { - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), createMemUnit()) - } - - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + for i := uint32(0); i < coordinator.NumberOfShards(); i++ { + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), createMemUnit()) + } + + return store } func createTestMetaDataPool() dataRetriever.MetaPoolsHolder { - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} - miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) + cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} + miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) - cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - headersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - headersNonces, _ := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + headersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + headersNonces, _ := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - dPool, _ := dataPool.NewMetaDataPool( - metaBlocks, - miniblockHashes, - shardHeaders, - headersNonces, - ) + dPool, _ := dataPool.NewMetaDataPool( + metaBlocks, + miniblockHashes, + shardHeaders, + headersNonces, + ) - return dPool + return dPool } func createMetaNetNode( - dPool dataRetriever.MetaPoolsHolder, - accntAdapter state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - initialAddr string, - params *cryptoParams, - keysIndex int, + dPool dataRetriever.MetaPoolsHolder, + accntAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { - tn := testNode{} - - tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - keyPair := params.keys[sharding.MetachainShardId][keysIndex] - pkBuff, _ := keyPair.pk.ToByteArray() - fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) - - tn.blkc = createTestMetaChain() - store := createTestMetaStore(shardCoordinator) - uint64Converter := uint64ByteSlice.NewBigEndianConverter() - - interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - tn.messenger, - store, - testMarshalizer, - testHasher, - testMultiSig, - dPool, - ) - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - - resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - tn.messenger, - store, - testMarshalizer, - dPool, - uint64Converter, - ) - resolversContainer, _ := resolversContainerFactory.Create() - resolvers, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - - requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic) - - genesisBlocks := createGenesisBlocks(shardCoordinator) - blkProc, _ := block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accntAdapter, - dPool, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - }, - shardCoordinator, - nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - testHasher, - testMarshalizer, - store, - genesisBlocks, - requestHandler, - uint64Converter, - ) - - _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) - - tn.blkProcessor = blkProc - - tn.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - testMarshalizer, - tn.messenger, - shardCoordinator, - keyPair.sk, - params.singleSigner, - ) - - n, err := node.NewNode( - node.WithMessenger(tn.messenger), - node.WithMarshalizer(testMarshalizer), - node.WithHasher(testHasher), - node.WithMetaDataPool(dPool), - node.WithAddressConverter(testAddressConverter), - node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(params.keyGen), - node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(tn.blkc), - node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(params.singleSigner), - node.WithPrivKey(keyPair.sk), - node.WithPubKey(keyPair.pk), - node.WithInterceptorsContainer(interceptorsContainer), - node.WithResolversFinder(resolvers), - node.WithBlockProcessor(tn.blkProcessor), - node.WithDataStore(store), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - if err != nil { - fmt.Println(err.Error()) - return nil - } - - tn.node = n - tn.sk = keyPair.sk - tn.pk = keyPair.pk - tn.accntState = accntAdapter - tn.shardId = sharding.MetachainShardId - - dPool.MetaChainBlocks().RegisterHandler(func(key []byte) { - atomic.AddInt32(&tn.metachainHdrRecv, 1) - }) - dPool.ShardHeaders().RegisterHandler(func(key []byte) { - atomic.AddInt32(&tn.headersRecv, 1) - tn.mutHeaders.Lock() - metaHeader, _ := dPool.ShardHeaders().Peek(key) - tn.headers = append(tn.headers, metaHeader.(data.HeaderHandler)) - tn.mutHeaders.Unlock() - }) - - return &tn + tn := testNode{} + + tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) + + tn.blkc = createTestMetaChain() + store := createTestMetaStore(shardCoordinator) + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + + interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + tn.messenger, + store, + testMarshalizer, + testHasher, + testMultiSig, + dPool, + ) + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + + resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + tn.messenger, + store, + testMarshalizer, + dPool, + uint64Converter, + ) + resolversContainer, _ := resolversContainerFactory.Create() + resolvers, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) + + requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic) + + genesisBlocks := createGenesisBlocks(shardCoordinator) + blkProc, _ := block.NewMetaProcessor( + &mock.ServiceContainerMock{}, + accntAdapter, + dPool, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + }, + shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + testHasher, + testMarshalizer, + store, + genesisBlocks, + requestHandler, + uint64Converter, + ) + + _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) + + tn.blkProcessor = blkProc + + tn.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + testMarshalizer, + tn.messenger, + shardCoordinator, + keyPair.sk, + params.singleSigner, + ) + + n, err := node.NewNode( + node.WithMessenger(tn.messenger), + node.WithMarshalizer(testMarshalizer), + node.WithHasher(testHasher), + node.WithMetaDataPool(dPool), + node.WithAddressConverter(testAddressConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithKeyGen(params.keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(tn.blkc), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithMultiSigner(testMultiSig), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), + node.WithInterceptorsContainer(interceptorsContainer), + node.WithResolversFinder(resolvers), + node.WithBlockProcessor(tn.blkProcessor), + node.WithDataStore(store), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + if err != nil { + fmt.Println(err.Error()) + return nil + } + + tn.node = n + tn.sk = keyPair.sk + tn.pk = keyPair.pk + tn.accntState = accntAdapter + tn.shardId = sharding.MetachainShardId + + dPool.MetaChainBlocks().RegisterHandler(func(key []byte) { + atomic.AddInt32(&tn.metachainHdrRecv, 1) + }) + dPool.ShardHeaders().RegisterHandler(func(key []byte) { + atomic.AddInt32(&tn.headersRecv, 1) + tn.mutHeaders.Lock() + metaHeader, _ := dPool.ShardHeaders().Peek(key) + tn.headers = append(tn.headers, metaHeader.(data.HeaderHandler)) + tn.mutHeaders.Unlock() + }) + + return &tn } func createGenesisBlocks(shardCoordinator sharding.Coordinator) map[uint32]data.HeaderHandler { - genesisBlocks := make(map[uint32]data.HeaderHandler) - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - genesisBlocks[shardId] = createGenesisBlock(shardId) - } + genesisBlocks := make(map[uint32]data.HeaderHandler) + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + genesisBlocks[shardId] = createGenesisBlock(shardId) + } - genesisBlocks[sharding.MetachainShardId] = createGenesisMetaBlock() + genesisBlocks[sharding.MetachainShardId] = createGenesisMetaBlock() - return genesisBlocks + return genesisBlocks } func createGenesisBlock(shardId uint32) *dataBlock.Header { - return &dataBlock.Header{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - ShardId: shardId, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardId: shardId, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } func createGenesisMetaBlock() *dataBlock.MetaBlock { - return &dataBlock.MetaBlock{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } func createMintingForSenders( - nodes []*testNode, - senderShard uint32, - sendersPublicKeys [][]byte, - value *big.Int, + nodes []*testNode, + senderShard uint32, + sendersPublicKeys [][]byte, + value *big.Int, ) { - for _, n := range nodes { - //only sender shard nodes will be minted - if n.shardId != senderShard { - continue - } + for _, n := range nodes { + //only sender shard nodes will be minted + if n.shardId != senderShard { + continue + } - for _, pk := range sendersPublicKeys { - adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pk) - account, _ := n.accntState.GetAccountWithJournal(adr) - _ = account.(*state.Account).SetBalanceWithJournal(value) - } + for _, pk := range sendersPublicKeys { + adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pk) + account, _ := n.accntState.GetAccountWithJournal(adr) + _ = account.(*state.Account).SetBalanceWithJournal(value) + } - _, _ = n.accntState.Commit() - } + _, _ = n.accntState.Commit() + } } func createVMAndBlockchainHook(accnts state.AccountsAdapter) (vmcommon.VMExecutionHandler, *hooks.VMAccountsDB) { - blockChainHook, _ := hooks.NewVMAccountsDB(accnts, addrConv) - vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, testHasher) - vm.GasForOperation = uint64(opGas) + blockChainHook, _ := hooks.NewVMAccountsDB(accnts, addrConv) + vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, testHasher) + vm.GasForOperation = uint64(opGas) - return vm, blockChainHook + return vm, blockChainHook } diff --git a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go index 7e2b1cbacf2..ec3afcd6772 100644 --- a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go +++ b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go @@ -1,21 +1,21 @@ package transaction import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" + "context" + "encoding/hex" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) // TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard tests what happens when @@ -23,66 +23,66 @@ import ( // Node 0 is part of the shard 0 and its public key is mapped also in shard 0. // Transactions should spread only in shard 0. func TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) - - fmt.Println("Generating and broadcasting transactions...") - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - time.Sleep(time.Second * 10) - - //since there is a slight chance that some transactions get lost (peer to slow, queue full, validators throttling...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender shard (all 3 nodes from shard 0) have the transactions - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == 0 { - assert.Equal(t, maxTxReceived, atomic.LoadInt32(&n.CounterTxRecv)) - continue - } - - assert.Equal(t, int32(0), atomic.LoadInt32(&n.CounterTxRecv)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) + + fmt.Println("Generating and broadcasting transactions...") + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + time.Sleep(time.Second * 10) + + //since there is a slight chance that some transactions get lost (peer to slow, queue full, validators throttling...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender shard (all 3 nodes from shard 0) have the transactions + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == 0 { + assert.Equal(t, maxTxReceived, atomic.LoadInt32(&n.CounterTxRecv)) + continue + } + + assert.Equal(t, int32(0), atomic.LoadInt32(&n.CounterTxRecv)) + } } // TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard tests what happens when @@ -90,74 +90,74 @@ func TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard(t *te // Node 0 is part of the shard 0 and its public key is mapped in shard 4. // Transactions should spread only in shard 4. func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - firstSkInShard := uint32(4) - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) - - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - - //display, can be removed - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - } - - //since there is a slight chance that some transactions get lost (peer to slow, queue full...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender shard (all 3 nodes from shard firstSkInShard) has the transactions - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == firstSkInShard { - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) - continue - } - - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + firstSkInShard := uint32(4) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) + + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + + //display, can be removed + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + } + + //since there is a slight chance that some transactions get lost (peer to slow, queue full...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender shard (all 3 nodes from shard firstSkInShard) has the transactions + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == firstSkInShard { + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) + continue + } + + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) + } } // TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShardAndRequestShouldWork tests what happens when @@ -169,183 +169,183 @@ func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard(t // Transactions requested by another shard (2 for example) will not store the received transactions // (interceptors will filter them out) func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShardAndRequestShouldWork(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - firstSkInShard := uint32(4) - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - shardRequester := uint32(5) - randomShard := uint32(2) - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), shardRequester) - - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - - mutGeneratedTxHashes := sync.Mutex{} - generatedTxHashes := make([][]byte, 0) - //wire a new hook for generated txs on a node in sender shard to populate tx hashes generated - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == firstSkInShard { - n.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { - mutGeneratedTxHashes.Lock() - generatedTxHashes = append(generatedTxHashes, key) - mutGeneratedTxHashes.Unlock() - }) - } - } - - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - - fmt.Println("Waiting for senders to fetch generated transactions...") - time.Sleep(time.Second * 10) - - fmt.Println("Request transactions by destination shard nodes...") - //periodically compute and request missing transactions - for i := 0; i < 10; i++ { - integrationTests.ComputeAndRequestMissingTransactions(nodes, generatedTxHashes, firstSkInShard, shardRequester, randomShard) - time.Sleep(time.Second) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - } - - //since there is a slight chance that some transactions get lost (peer to slow, queue full...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender and destination shards have the transactions - for _, n := range nodes { - isSenderOrDestinationShard := n.ShardCoordinator.SelfId() == firstSkInShard || n.ShardCoordinator.SelfId() == shardRequester - - if isSenderOrDestinationShard { - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) - continue - } - - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + firstSkInShard := uint32(4) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + shardRequester := uint32(5) + randomShard := uint32(2) + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), shardRequester) + + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + + mutGeneratedTxHashes := sync.Mutex{} + generatedTxHashes := make([][]byte, 0) + //wire a new hook for generated txs on a node in sender shard to populate tx hashes generated + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == firstSkInShard { + n.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { + mutGeneratedTxHashes.Lock() + generatedTxHashes = append(generatedTxHashes, key) + mutGeneratedTxHashes.Unlock() + }) + } + } + + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + + fmt.Println("Waiting for senders to fetch generated transactions...") + time.Sleep(time.Second * 10) + + fmt.Println("Request transactions by destination shard nodes...") + //periodically compute and request missing transactions + for i := 0; i < 10; i++ { + integrationTests.ComputeAndRequestMissingTransactions(nodes, generatedTxHashes, firstSkInShard, shardRequester, randomShard) + time.Sleep(time.Second) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + } + + //since there is a slight chance that some transactions get lost (peer to slow, queue full...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender and destination shards have the transactions + for _, n := range nodes { + isSenderOrDestinationShard := n.ShardCoordinator.SelfId() == firstSkInShard || n.ShardCoordinator.SelfId() == shardRequester + + if isSenderOrDestinationShard { + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) + continue + } + + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) + } } func TestNode_InMultiShardEnvRequestTxsShouldRequireOnlyFromTheOtherShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := make([]*integrationTests.TestProcessorNode, 0) - maxShards := 2 - nodesPerShard := 2 - txGenerated := 10 - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - //shard 0, requesters - recvTxs := make(map[int]map[string]struct{}) - mutRecvTxs := sync.Mutex{} - for i := 0; i < nodesPerShard; i++ { - dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) - - tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( - uint32(maxShards), - 0, - 0, - integrationTests.GetConnectableAddress(advertiser), - dPool, - ) - - nodes = append(nodes, tn) - } - - var txHashesGenerated [][]byte - var dPool dataRetriever.PoolsHolder - shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) - dPool, txHashesGenerated = integrationTests.CreateResolversDataPool(t, txGenerated, 0, 1, shardCoordinator) - //shard 1, resolvers, same data pool, does not matter - for i := 0; i < nodesPerShard; i++ { - tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( - uint32(maxShards), - 1, - 1, - integrationTests.GetConnectableAddress(advertiser), - dPool, - ) - - atomic.StoreInt32(&tn.CounterTxRecv, int32(txGenerated)) - - nodes = append(nodes, tn) - } - - integrationTests.DisplayAndStartNodes(nodes) - fmt.Println("Delaying for node bootstrap and topic announcement...") - time.Sleep(time.Second * 5) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - - fmt.Println("Request nodes start asking the data...") - reqShardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) - for i := 0; i < nodesPerShard; i++ { - resolver, _ := nodes[i].ResolverFinder.Get(factory.TransactionTopic + reqShardCoordinator.CommunicationIdentifier(1)) - txResolver, ok := resolver.(*resolvers.TxResolver) - assert.True(t, ok) - - _ = txResolver.RequestDataFromHashArray(txHashesGenerated) - } - - time.Sleep(time.Second * 5) - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - for i := 0; i < nodesPerShard; i++ { - mapTx := recvTxs[i] - assert.NotNil(t, mapTx) - - txsReceived := len(recvTxs[i]) - assert.Equal(t, txGenerated, txsReceived) - - atomic.StoreInt32(&nodes[i].CounterTxRecv, int32(txsReceived)) - } - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) + if testing.Short() { + t.Skip("this is not a short test") + } + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := make([]*integrationTests.TestProcessorNode, 0) + maxShards := 2 + nodesPerShard := 2 + txGenerated := 10 + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + //shard 0, requesters + recvTxs := make(map[int]map[string]struct{}) + mutRecvTxs := sync.Mutex{} + for i := 0; i < nodesPerShard; i++ { + dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) + + tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( + uint32(maxShards), + 0, + 0, + integrationTests.GetConnectableAddress(advertiser), + dPool, + ) + + nodes = append(nodes, tn) + } + + var txHashesGenerated [][]byte + var dPool dataRetriever.PoolsHolder + shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) + dPool, txHashesGenerated = integrationTests.CreateResolversDataPool(t, txGenerated, 0, 1, shardCoordinator) + //shard 1, resolvers, same data pool, does not matter + for i := 0; i < nodesPerShard; i++ { + tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( + uint32(maxShards), + 1, + 1, + integrationTests.GetConnectableAddress(advertiser), + dPool, + ) + + atomic.StoreInt32(&tn.CounterTxRecv, int32(txGenerated)) + + nodes = append(nodes, tn) + } + + integrationTests.DisplayAndStartNodes(nodes) + fmt.Println("Delaying for node bootstrap and topic announcement...") + time.Sleep(time.Second * 5) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + + fmt.Println("Request nodes start asking the data...") + reqShardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) + for i := 0; i < nodesPerShard; i++ { + resolver, _ := nodes[i].ResolverFinder.Get(factory.TransactionTopic + reqShardCoordinator.CommunicationIdentifier(1)) + txResolver, ok := resolver.(*resolvers.TxResolver) + assert.True(t, ok) + + _ = txResolver.RequestDataFromHashArray(txHashesGenerated) + } + + time.Sleep(time.Second * 5) + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + for i := 0; i < nodesPerShard; i++ { + mapTx := recvTxs[i] + assert.NotNil(t, mapTx) + + txsReceived := len(recvTxs[i]) + assert.Equal(t, txGenerated, txsReceived) + + atomic.StoreInt32(&nodes[i].CounterTxRecv, int32(txsReceived)) + } + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) } diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 3e527b4351d..962589ef327 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -1,182 +1,182 @@ package transaction import ( - "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "math/big" - "reflect" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/stretchr/testify/assert" + "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/stretchr/testify/assert" ) func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - var nrOfShards uint32 = 1 - var shardID uint32 = 0 - var txSignPrivKeyShardId uint32 = 0 - requesterNodeAddr := "0" - resolverNodeAddr := "1" - - fmt.Println("Requester: ") - nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) - - fmt.Println("Resolver:") - nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() - defer func() { - _ = nRequester.Node.Stop() - _ = nResolver.Node.Stop() - }() - - //connect messengers together - time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) - assert.Nil(t, err) - - time.Sleep(time.Second) - - buffPk1, _ := nRequester.OwnAccount.SkTxSign.GeneratePublic().ToByteArray() - - //Step 1. Generate a signed transaction - tx := transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - SndAddr: buffPk1, - Data: "tx notarized data", - } - - txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(nRequester.OwnAccount.SkTxSign, txBuff) - signedTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - - fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) - - chanDone := make(chan bool) - - txHash := integrationTests.TestHasher.Compute(string(signedTxBuff)) - - //step 2. wire up a received handler for requester - nRequester.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { - txStored, _ := nRequester.ShardDataPool.Transactions().ShardDataStore( - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ).Get(key) - - if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { - chanDone <- true - } - - assert.Equal(t, txStored, &tx) - assert.Equal(t, txHash, key) - }) - - //Step 3. add the transaction in resolver pool - nResolver.ShardDataPool.Transactions().AddData( - txHash, - &tx, - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ) - - //Step 4. request tx - txResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.TransactionTopic) - err = txResolver.RequestDataFromHash(txHash) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - } + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + buffPk1, _ := nRequester.OwnAccount.SkTxSign.GeneratePublic().ToByteArray() + + //Step 1. Generate a signed transaction + tx := transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + SndAddr: buffPk1, + Data: "tx notarized data", + } + + txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(nRequester.OwnAccount.SkTxSign, txBuff) + signedTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(signedTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { + txStored, _ := nRequester.ShardDataPool.Transactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { + chanDone <- true + } + + assert.Equal(t, txStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.Transactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + txResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.TransactionTopic) + err = txResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } } func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - var nrOfShards uint32 = 1 - var shardID uint32 = 0 - var txSignPrivKeyShardId uint32 = 0 - requesterNodeAddr := "0" - resolverNodeAddr := "1" - - fmt.Println("Requester: ") - nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) - - fmt.Println("Resolver:") - nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() - defer func() { - _ = nRequester.Node.Stop() - _ = nResolver.Node.Stop() - }() - - //connect messengers together - time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) - assert.Nil(t, err) - - time.Sleep(time.Second) - - //Step 1. Generate a signed transaction - tx := rewardTx.RewardTx{ - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - Round: 0, - Epoch: 0, - ShardId: 0, - } - - marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - - fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) - - chanDone := make(chan bool) - - txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) - - //step 2. wire up a received handler for requester - nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { - rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ).Get(key) - - if reflect.DeepEqual(rewardTxStored, &tx){ - chanDone <- true - } - - assert.Equal(t, rewardTxStored, &tx) - assert.Equal(t, txHash, key) - }) - - //Step 3. add the transaction in resolver pool - nResolver.ShardDataPool.RewardTransactions().AddData( - txHash, - &tx, - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ) - - //Step 4. request tx - rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) - err = rewardTxResolver.RequestDataFromHash(txHash) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - } + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + //Step 1. Generate a signed transaction + tx := rewardTx.RewardTx{ + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + Round: 0, + Epoch: 0, + ShardId: 0, + } + + marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { + rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(rewardTxStored, &tx) { + chanDone <- true + } + + assert.Equal(t, rewardTxStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.RewardTransactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + err = rewardTxResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index a9da172b806..1ff9e0c5b1b 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1,57 +1,57 @@ package integrationTests import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "fmt" - "math/big" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - procFactory "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - txProc "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + procFactory "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + txProc "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" ) var stepDelay = time.Second @@ -59,1165 +59,1165 @@ var p2pBootstrapStepDelay = 5 * time.Second // GetConnectableAddress returns a non circuit, non windows default connectable address for provided messenger func GetConnectableAddress(mes p2p.Messenger) string { - for _, addr := range mes.Addresses() { - if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { - continue - } - return addr - } - return "" + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { + continue + } + return addr + } + return "" } // CreateMessengerWithKadDht creates a new libp2p messenger with kad-dht peer discovery func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( - ctx, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - discovery.NewKadDhtPeerDiscoverer(stepDelay, "test", []string{initialAddr}), - ) - if err != nil { - fmt.Println(err.Error()) - } + libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( + ctx, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + discovery.NewKadDhtPeerDiscoverer(stepDelay, "test", []string{initialAddr}), + ) + if err != nil { + fmt.Println(err.Error()) + } - return libP2PMes + return libP2PMes } // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier, nbShards uint32) dataRetriever.PoolsHolder { - if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) - } + if txPool == nil { + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - dPool, _ := dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardsTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlocks, - ) + dPool, _ := dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardsTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlocks, + ) - return dPool + return dPool } // CreateTestMetaDataPool creates a test data pool for meta nodes func CreateTestMetaDataPool() dataRetriever.MetaPoolsHolder { - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} - miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) + cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} + miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) - cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - shardHeadersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - shardHeadersNonces, _ := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + shardHeadersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + shardHeadersNonces, _ := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - dPool, _ := dataPool.NewMetaDataPool( - metaBlocks, - miniblockHashes, - shardHeaders, - shardHeadersNonces, - ) + dPool, _ := dataPool.NewMetaDataPool( + metaBlocks, + miniblockHashes, + shardHeaders, + shardHeadersNonces, + ) - return dPool + return dPool } // CreateMemUnit returns an in-memory storer implementation (the vast majority of tests do not require effective // disk I/O) func CreateMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.NewlruDB(100000) - unit, _ := storageUnit.NewStorageUnit(cache, persist) + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.NewlruDB(100000) + unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + return unit } // CreateShardStore creates a storage service for shard nodes func CreateShardStore(numOfShards uint32) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) - for i := uint32(0); i < numOfShards; i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) - } + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) + } - return store + return store } // CreateMetaStore creates a storage service for meta nodes func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) - for i := uint32(0); i < coordinator.NumberOfShards(); i++ { - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), CreateMemUnit()) - } + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + for i := uint32(0); i < coordinator.NumberOfShards(); i++ { + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), CreateMemUnit()) + } - return store + return store } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { - hasher := sha256.Sha256{} - store := CreateMemUnit() + hasher := sha256.Sha256{} + store := CreateMemUnit() - tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) - accountFactory, _ := factory.NewAccountFactoryCreator(accountType) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) + tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) - return adb, tr, store + return adb, tr, store } // CreateShardChain creates a blockchain implementation used by the shard nodes func CreateShardChain() *blockchain.BlockChain { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - blockChain, _ := blockchain.NewBlockChain( - badBlockCache, - ) - blockChain.GenesisHeader = &dataBlock.Header{} - genesisHeaderM, _ := TestMarshalizer.Marshal(blockChain.GenesisHeader) + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + ) + blockChain.GenesisHeader = &dataBlock.Header{} + genesisHeaderM, _ := TestMarshalizer.Marshal(blockChain.GenesisHeader) - blockChain.SetGenesisHeaderHash(TestHasher.Compute(string(genesisHeaderM))) + blockChain.SetGenesisHeaderHash(TestHasher.Compute(string(genesisHeaderM))) - return blockChain + return blockChain } // CreateMetaChain creates a blockchain implementation used by the meta nodes func CreateMetaChain() data.ChainHandler { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - metaChain, _ := blockchain.NewMetaChain( - badBlockCache, - ) - metaChain.GenesisBlock = &dataBlock.MetaBlock{} + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + metaChain, _ := blockchain.NewMetaChain( + badBlockCache, + ) + metaChain.GenesisBlock = &dataBlock.MetaBlock{} - return metaChain + return metaChain } // CreateGenesisBlocks creates empty genesis blocks for all known shards, including metachain func CreateGenesisBlocks(shardCoordinator sharding.Coordinator) map[uint32]data.HeaderHandler { - genesisBlocks := make(map[uint32]data.HeaderHandler) - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - genesisBlocks[shardId] = CreateGenesisBlock(shardId) - } + genesisBlocks := make(map[uint32]data.HeaderHandler) + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + genesisBlocks[shardId] = CreateGenesisBlock(shardId) + } - genesisBlocks[sharding.MetachainShardId] = CreateGenesisMetaBlock() + genesisBlocks[sharding.MetachainShardId] = CreateGenesisMetaBlock() - return genesisBlocks + return genesisBlocks } // CreateGenesisBlock creates a new mock shard genesis block func CreateGenesisBlock(shardId uint32) *dataBlock.Header { - rootHash := []byte("root hash") + rootHash := []byte("root hash") - return &dataBlock.Header{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - ShardId: shardId, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardId: shardId, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } // CreateGenesisMetaBlock creates a new mock meta genesis block func CreateGenesisMetaBlock() *dataBlock.MetaBlock { - rootHash := []byte("root hash") + rootHash := []byte("root hash") - return &dataBlock.MetaBlock{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } // CreateIeleVMAndBlockchainHook creates a new instance of a iele VM func CreateIeleVMAndBlockchainHook(accnts state.AccountsAdapter) (vmcommon.VMExecutionHandler, *hooks.VMAccountsDB) { - blockChainHook, _ := hooks.NewVMAccountsDB(accnts, TestAddressConverter) - cryptoHook := hooks.NewVMCryptoHook() - vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, endpoint.ElrondTestnet) + blockChainHook, _ := hooks.NewVMAccountsDB(accnts, TestAddressConverter) + cryptoHook := hooks.NewVMCryptoHook() + vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, endpoint.ElrondTestnet) - return vm, blockChainHook + return vm, blockChainHook } // CreateAddressFromAddrBytes creates an address container object from address bytes provided func CreateAddressFromAddrBytes(addressBytes []byte) state.AddressContainer { - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(addressBytes) - return addr + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(addressBytes) + return addr } // CreateRandomAddress creates a random byte array with fixed size func CreateRandomAddress() state.AddressContainer { - addr, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) - return addr + addr, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) + return addr } // MintAddress will create an account (if it does not exists), update the balance with required value, // save the account and commit the trie. func MintAddress(accnts state.AccountsAdapter, addressBytes []byte, value *big.Int) { - accnt, _ := accnts.GetAccountWithJournal(CreateAddressFromAddrBytes(addressBytes)) - _ = accnt.(*state.Account).SetBalanceWithJournal(value) - _, _ = accnts.Commit() + accnt, _ := accnts.GetAccountWithJournal(CreateAddressFromAddrBytes(addressBytes)) + _ = accnt.(*state.Account).SetBalanceWithJournal(value) + _, _ = accnts.Commit() } // CreateAccount creates a new account and returns the address func CreateAccount(accnts state.AccountsAdapter, nonce uint64, balance *big.Int) state.AddressContainer { - address, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) - account, _ := accnts.GetAccountWithJournal(address) - _ = account.(*state.Account).SetNonceWithJournal(nonce) - _ = account.(*state.Account).SetBalanceWithJournal(balance) + address, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) + account, _ := accnts.GetAccountWithJournal(address) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) - return address + return address } // MakeDisplayTable will output a string containing counters for received transactions, headers, miniblocks and // meta headers for all provided test nodes func MakeDisplayTable(nodes []*TestProcessorNode) string { - header := []string{"pk", "shard ID", "txs", "miniblocks", "headers", "metachain headers"} - dataLines := make([]*display.LineData, len(nodes)) - - for idx, n := range nodes { - dataLines[idx] = display.NewLineData( - false, - []string{ - hex.EncodeToString(n.OwnAccount.PkTxSignBytes), - fmt.Sprintf("%d", n.ShardCoordinator.SelfId()), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterTxRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), - }, - ) - } - table, _ := display.CreateTableString(header, dataLines) - - return table + header := []string{"pk", "shard ID", "txs", "miniblocks", "headers", "metachain headers"} + dataLines := make([]*display.LineData, len(nodes)) + + for idx, n := range nodes { + dataLines[idx] = display.NewLineData( + false, + []string{ + hex.EncodeToString(n.OwnAccount.PkTxSignBytes), + fmt.Sprintf("%d", n.ShardCoordinator.SelfId()), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterTxRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), + }, + ) + } + table, _ := display.CreateTableString(header, dataLines) + + return table } // PrintShardAccount outputs on console a shard account data contained func PrintShardAccount(accnt *state.Account, tag string) { - str := fmt.Sprintf("%s Address: %s\n", tag, base64.StdEncoding.EncodeToString(accnt.AddressContainer().Bytes())) - str += fmt.Sprintf(" Nonce: %d\n", accnt.Nonce) - str += fmt.Sprintf(" Balance: %d\n", accnt.Balance.Uint64()) - str += fmt.Sprintf(" Code hash: %s\n", base64.StdEncoding.EncodeToString(accnt.CodeHash)) - str += fmt.Sprintf(" Root hash: %s\n", base64.StdEncoding.EncodeToString(accnt.RootHash)) + str := fmt.Sprintf("%s Address: %s\n", tag, base64.StdEncoding.EncodeToString(accnt.AddressContainer().Bytes())) + str += fmt.Sprintf(" Nonce: %d\n", accnt.Nonce) + str += fmt.Sprintf(" Balance: %d\n", accnt.Balance.Uint64()) + str += fmt.Sprintf(" Code hash: %s\n", base64.StdEncoding.EncodeToString(accnt.CodeHash)) + str += fmt.Sprintf(" Root hash: %s\n", base64.StdEncoding.EncodeToString(accnt.RootHash)) - fmt.Println(str) + fmt.Println(str) } // CreateRandomHexString returns a string encoded in hex with the given size func CreateRandomHexString(chars int) string { - if chars < 1 { - return "" - } + if chars < 1 { + return "" + } - buff := make([]byte, chars/2) - _, _ = rand.Reader.Read(buff) + buff := make([]byte, chars/2) + _, _ = rand.Reader.Read(buff) - return hex.EncodeToString(buff) + return hex.EncodeToString(buff) } // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { - adr := CreateRandomAddress() - adb, _, _ := CreateAccountsDB(factory.UserAccount) - account, _ := state.NewAccount(adr, adb) + adr := CreateRandomAddress() + adb, _, _ := CreateAccountsDB(factory.UserAccount) + account, _ := state.NewAccount(adr, adb) - return adr, account, adb + return adr, account, adb } // AdbEmulateBalanceTxSafeExecution emulates a tx execution by altering the accounts // balance and nonce, and printing any encountered error func AdbEmulateBalanceTxSafeExecution(acntSrc, acntDest *state.Account, accounts state.AccountsAdapter, value *big.Int) { - snapshot := accounts.JournalLen() - err := AdbEmulateBalanceTxExecution(acntSrc, acntDest, value) + snapshot := accounts.JournalLen() + err := AdbEmulateBalanceTxExecution(acntSrc, acntDest, value) - if err != nil { - fmt.Printf("Error executing tx (value: %v), reverting...\n", value) - err = accounts.RevertToSnapshot(snapshot) + if err != nil { + fmt.Printf("Error executing tx (value: %v), reverting...\n", value) + err = accounts.RevertToSnapshot(snapshot) - if err != nil { - panic(err) - } - } + if err != nil { + panic(err) + } + } } // AdbEmulateBalanceTxExecution emulates a tx execution by altering the accounts // balance and nonce, and printing any encountered error func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.Int) error { - srcVal := acntSrc.Balance - destVal := acntDest.Balance + srcVal := acntSrc.Balance + destVal := acntDest.Balance - if srcVal.Cmp(value) < 0 { - return errors.New("not enough funds") - } + if srcVal.Cmp(value) < 0 { + return errors.New("not enough funds") + } - err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) - if err != nil { - return err - } + err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) + if err != nil { + return err + } - err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) - if err != nil { - return err - } + err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) + if err != nil { + return err + } - err = acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) - if err != nil { - return err - } + err = acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) + if err != nil { + return err + } - return nil + return nil } // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor( - accnts, - TestHasher, - TestAddressConverter, - TestMarshalizer, - shardCoordinator, - &mock.SCProcessorMock{}, - &mock.UnsignedTxHandlerMock{}, - &mock.TxTypeHandlerMock{}, - ) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + txProcessor, _ := txProc.NewTxProcessor( + accnts, + TestHasher, + TestAddressConverter, + TestMarshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) - return txProcessor + return txProcessor } // CreateNewDefaultTrie returns a new trie with test hasher and marsahalizer func CreateNewDefaultTrie() data.Trie { - tr, _ := trie.NewTrie(CreateMemUnit(), TestMarshalizer, TestHasher) - return tr + tr, _ := trie.NewTrie(CreateMemUnit(), TestMarshalizer, TestHasher) + return tr } // GenerateRandomSlice returns a random byte slice with the given size func GenerateRandomSlice(size int) []byte { - buff := make([]byte, size) - _, _ = rand.Reader.Read(buff) + buff := make([]byte, size) + _, _ = rand.Reader.Read(buff) - return buff + return buff } // MintAllNodes will take each shard node (n) and will mint all nodes that have their pk managed by the iterating node n func MintAllNodes(nodes []*TestProcessorNode, value *big.Int) { - for idx, n := range nodes { - if n.ShardCoordinator.SelfId() == sharding.MetachainShardId { - continue - } + for idx, n := range nodes { + if n.ShardCoordinator.SelfId() == sharding.MetachainShardId { + continue + } - mintAddressesFromSameShard(nodes, idx, value) - } + mintAddressesFromSameShard(nodes, idx, value) + } } func mintAddressesFromSameShard(nodes []*TestProcessorNode, targetNodeIdx int, value *big.Int) { - targetNode := nodes[targetNodeIdx] + targetNode := nodes[targetNodeIdx] - for _, n := range nodes { - shardId := targetNode.ShardCoordinator.ComputeId(n.OwnAccount.Address) - if shardId != targetNode.ShardCoordinator.SelfId() { - continue - } + for _, n := range nodes { + shardId := targetNode.ShardCoordinator.ComputeId(n.OwnAccount.Address) + if shardId != targetNode.ShardCoordinator.SelfId() { + continue + } - n.OwnAccount.Balance = big.NewInt(0).Set(value) - MintAddress(targetNode.AccntState, n.OwnAccount.PkTxSignBytes, value) - } + n.OwnAccount.Balance = big.NewInt(0).Set(value) + MintAddress(targetNode.AccntState, n.OwnAccount.PkTxSignBytes, value) + } } // MintAllPlayers mints addresses for all players func MintAllPlayers(nodes []*TestProcessorNode, players []*TestWalletAccount, value *big.Int) { - shardCoordinator := nodes[0].ShardCoordinator + shardCoordinator := nodes[0].ShardCoordinator - for _, player := range players { - pShardId := shardCoordinator.ComputeId(player.Address) + for _, player := range players { + pShardId := shardCoordinator.ComputeId(player.Address) - for _, n := range nodes { - if pShardId != n.ShardCoordinator.SelfId() { - continue - } + for _, n := range nodes { + if pShardId != n.ShardCoordinator.SelfId() { + continue + } - MintAddress(n.AccntState, player.Address.Bytes(), value) - player.Balance = big.NewInt(0).Set(value) - } - } + MintAddress(n.AccntState, player.Address.Bytes(), value) + player.Balance = big.NewInt(0).Set(value) + } + } } // IncrementAndPrintRound increments the given variable, and prints the message for the beginning of the round func IncrementAndPrintRound(round uint64) uint64 { - round++ - fmt.Printf("#################################### ROUND %d BEGINS ####################################\n\n", round) + round++ + fmt.Printf("#################################### ROUND %d BEGINS ####################################\n\n", round) - return round + return round } // ProposeBlock proposes a block with SC txs for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { - fmt.Println("All shards propose blocks...") - for idx, n := range nodes { - if !IsIntInSlice(idx, idxProposers) { - continue - } + fmt.Println("All shards propose blocks...") + for idx, n := range nodes { + if !IsIntInSlice(idx, idxProposers) { + continue + } - body, header, _ := n.ProposeBlock(round, nonce) - n.BroadcastBlock(body, header) - n.CommitBlock(body, header) - } + body, header, _ := n.ProposeBlock(round, nonce) + n.BroadcastBlock(body, header) + n.CommitBlock(body, header) + } - fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) - fmt.Println(MakeDisplayTable(nodes)) + fmt.Println("Delaying for disseminating headers and miniblocks...") + time.Sleep(stepDelay) + fmt.Println(MakeDisplayTable(nodes)) } // SyncBlock synchronizes the proposed block in all the other shard nodes func SyncBlock( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, ) { - fmt.Println("All other shard nodes sync the proposed block...") - for idx, n := range nodes { - if IsIntInSlice(idx, idxProposers) { - continue - } + fmt.Println("All other shard nodes sync the proposed block...") + for idx, n := range nodes { + if IsIntInSlice(idx, idxProposers) { + continue + } - err := n.SyncNode(round) - if err != nil { - assert.Fail(t, err.Error()) - return - } - } + err := n.SyncNode(round) + if err != nil { + assert.Fail(t, err.Error()) + return + } + } - time.Sleep(stepDelay) - fmt.Println(MakeDisplayTable(nodes)) + time.Sleep(stepDelay) + fmt.Println(MakeDisplayTable(nodes)) } // IsIntInSlice returns true if idx is found on any position in the provided slice func IsIntInSlice(idx int, slice []int) bool { - for _, value := range slice { - if value == idx { - return true - } - } + for _, value := range slice { + if value == idx { + return true + } + } - return false + return false } // Uint32InSlice checks if a uint32 value is in a slice func Uint32InSlice(searched uint32, list []uint32) bool { - for _, val := range list { - if val == searched { - return true - } - } - return false + for _, val := range list { + if val == searched { + return true + } + } + return false } // CheckRootHashes checks the root hash of the proposer in every shard func CheckRootHashes(t *testing.T, nodes []*TestProcessorNode, idxProposers []int) { - for _, idx := range idxProposers { - checkRootHashInShard(t, nodes, idx) - } + for _, idx := range idxProposers { + checkRootHashInShard(t, nodes, idx) + } } func checkRootHashInShard(t *testing.T, nodes []*TestProcessorNode, idxProposer int) { - proposerNode := nodes[idxProposer] - proposerRootHash, _ := proposerNode.AccntState.RootHash() + proposerNode := nodes[idxProposer] + proposerRootHash, _ := proposerNode.AccntState.RootHash() - for i := 0; i < len(nodes); i++ { - n := nodes[i] + for i := 0; i < len(nodes); i++ { + n := nodes[i] - if n.ShardCoordinator.SelfId() != proposerNode.ShardCoordinator.SelfId() { - continue - } + if n.ShardCoordinator.SelfId() != proposerNode.ShardCoordinator.SelfId() { + continue + } - fmt.Printf("Testing roothash for node index %d, shard ID %d...\n", i, n.ShardCoordinator.SelfId()) - nodeRootHash, _ := n.AccntState.RootHash() - assert.Equal(t, proposerRootHash, nodeRootHash) - } + fmt.Printf("Testing roothash for node index %d, shard ID %d...\n", i, n.ShardCoordinator.SelfId()) + nodeRootHash, _ := n.AccntState.RootHash() + assert.Equal(t, proposerRootHash, nodeRootHash) + } } // CheckTxPresentAndRightNonce verifies that the nonce was updated correctly after the exec of bulk txs func CheckTxPresentAndRightNonce( - t *testing.T, - startingNonce uint64, - noOfTxs int, - txHashes [][]byte, - txs []data.TransactionHandler, - cache dataRetriever.ShardedDataCacherNotifier, - shardCoordinator sharding.Coordinator, + t *testing.T, + startingNonce uint64, + noOfTxs int, + txHashes [][]byte, + txs []data.TransactionHandler, + cache dataRetriever.ShardedDataCacherNotifier, + shardCoordinator sharding.Coordinator, ) { - if noOfTxs != len(txHashes) { - for i := startingNonce; i < startingNonce+uint64(noOfTxs); i++ { - found := false - - for _, txHandler := range txs { - nonce := extractUint64ValueFromTxHandler(txHandler) - if nonce == i { - found = true - break - } - } - - if !found { - fmt.Printf("unsigned tx with nonce %d is missing\n", i) - } - } - assert.Fail(t, fmt.Sprintf("should have been %d, got %d", noOfTxs, len(txHashes))) - - return - } - - bitmap := make([]bool, noOfTxs+int(startingNonce)) - //set for each nonce from found tx a true flag in bitmap - for i := 0; i < noOfTxs; i++ { - selfId := shardCoordinator.SelfId() - shardDataStore := cache.ShardDataStore(process.ShardCacherIdentifier(selfId, selfId)) - val, _ := shardDataStore.Get(txHashes[i]) - if val == nil { - continue - } - - nonce := extractUint64ValueFromTxHandler(val.(data.TransactionHandler)) - bitmap[nonce] = true - } - - //for the first startingNonce values, the bitmap should be false - //for the rest, true - for i := 0; i < noOfTxs+int(startingNonce); i++ { - if i < int(startingNonce) { - assert.False(t, bitmap[i]) - continue - } - - assert.True(t, bitmap[i]) - } + if noOfTxs != len(txHashes) { + for i := startingNonce; i < startingNonce+uint64(noOfTxs); i++ { + found := false + + for _, txHandler := range txs { + nonce := extractUint64ValueFromTxHandler(txHandler) + if nonce == i { + found = true + break + } + } + + if !found { + fmt.Printf("unsigned tx with nonce %d is missing\n", i) + } + } + assert.Fail(t, fmt.Sprintf("should have been %d, got %d", noOfTxs, len(txHashes))) + + return + } + + bitmap := make([]bool, noOfTxs+int(startingNonce)) + //set for each nonce from found tx a true flag in bitmap + for i := 0; i < noOfTxs; i++ { + selfId := shardCoordinator.SelfId() + shardDataStore := cache.ShardDataStore(process.ShardCacherIdentifier(selfId, selfId)) + val, _ := shardDataStore.Get(txHashes[i]) + if val == nil { + continue + } + + nonce := extractUint64ValueFromTxHandler(val.(data.TransactionHandler)) + bitmap[nonce] = true + } + + //for the first startingNonce values, the bitmap should be false + //for the rest, true + for i := 0; i < noOfTxs+int(startingNonce); i++ { + if i < int(startingNonce) { + assert.False(t, bitmap[i]) + continue + } + + assert.True(t, bitmap[i]) + } } func extractUint64ValueFromTxHandler(txHandler data.TransactionHandler) uint64 { - tx, ok := txHandler.(*transaction.Transaction) - if ok { - return tx.Nonce - } + tx, ok := txHandler.(*transaction.Transaction) + if ok { + return tx.Nonce + } - buff, _ := hex.DecodeString(txHandler.GetData()) - return binary.BigEndian.Uint64(buff) + buff, _ := hex.DecodeString(txHandler.GetData()) + return binary.BigEndian.Uint64(buff) } // CreateNodes creates multiple nodes in different shards func CreateNodes( - numOfShards int, - nodesPerShard int, - numMetaChainNodes int, - serviceID string, + numOfShards int, + nodesPerShard int, + numMetaChainNodes int, + serviceID string, ) []*TestProcessorNode { - //first node generated will have is pk belonging to firstSkShardId - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + //first node generated will have is pk belonging to firstSkShardId + nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - idx := 0 - for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { - for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) + idx := 0 + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { + for j := 0; j < nodesPerShard; j++ { + n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) - nodes[idx] = n - idx++ - } - } + nodes[idx] = n + idx++ + } + } - for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) - idx = i + numOfShards*nodesPerShard - nodes[idx] = metaNode - } + for i := 0; i < numMetaChainNodes; i++ { + metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) + idx = i + numOfShards*nodesPerShard + nodes[idx] = metaNode + } - return nodes + return nodes } // DisplayAndStartNodes prints each nodes shard ID, sk and pk, and then starts the node func DisplayAndStartNodes(nodes []*TestProcessorNode) { - for _, n := range nodes { - skBuff, _ := n.OwnAccount.SkTxSign.ToByteArray() - pkBuff, _ := n.OwnAccount.PkTxSign.ToByteArray() + for _, n := range nodes { + skBuff, _ := n.OwnAccount.SkTxSign.ToByteArray() + pkBuff, _ := n.OwnAccount.PkTxSign.ToByteArray() - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.ShardCoordinator.SelfId(), - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.Node.Start() - _ = n.Node.P2PBootstrap() - } + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.ShardCoordinator.SelfId(), + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.Node.Start() + _ = n.Node.P2PBootstrap() + } - fmt.Println("Delaying for node bootstrap and topic announcement...") - time.Sleep(p2pBootstrapStepDelay) + fmt.Println("Delaying for node bootstrap and topic announcement...") + time.Sleep(p2pBootstrapStepDelay) } // GenerateAndDisseminateTxs generates and sends multiple txs func GenerateAndDisseminateTxs( - n *TestProcessorNode, - senders []crypto.PrivateKey, - receiversPrivateKeys map[uint32][]crypto.PrivateKey, - valToTransfer *big.Int, - gasPrice uint64, - gasLimit uint64, + n *TestProcessorNode, + senders []crypto.PrivateKey, + receiversPrivateKeys map[uint32][]crypto.PrivateKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { - for i := 0; i < len(senders); i++ { - senderKey := senders[i] - incrementalNonce := make([]uint64, len(senders)) - for _, recvPrivateKeys := range receiversPrivateKeys { - receiverKey := recvPrivateKeys[i] - tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) - _, _ = n.SendTransaction(tx) - incrementalNonce[i]++ - } - } + for i := 0; i < len(senders); i++ { + senderKey := senders[i] + incrementalNonce := make([]uint64, len(senders)) + for _, recvPrivateKeys := range receiversPrivateKeys { + receiverKey := recvPrivateKeys[i] + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) + _, _ = n.SendTransaction(tx) + incrementalNonce[i]++ + } + } } type txArgs struct { - nonce uint64 - value *big.Int - rcvAddr []byte - sndAddr []byte - data string - gasPrice int - gasLimit int + nonce uint64 + value *big.Int + rcvAddr []byte + sndAddr []byte + data string + gasPrice int + gasLimit int } func generateTransferTx( - nonce uint64, - sender crypto.PrivateKey, - receiver crypto.PrivateKey, - valToTransfer *big.Int, - gasPrice uint64, - gasLimit uint64, + nonce uint64, + sender crypto.PrivateKey, + receiver crypto.PrivateKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) *transaction.Transaction { - tx := transaction.Transaction{ - Nonce: nonce, - Value: valToTransfer, - RcvAddr: skToPk(receiver), - SndAddr: skToPk(sender), - Data: "", - GasLimit: gasLimit, - GasPrice: gasPrice, - } - txBuff, _ := TestMarshalizer.Marshal(&tx) - signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(sender, txBuff) + tx := transaction.Transaction{ + Nonce: nonce, + Value: valToTransfer, + RcvAddr: skToPk(receiver), + SndAddr: skToPk(sender), + Data: "", + GasLimit: gasLimit, + GasPrice: gasPrice, + } + txBuff, _ := TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(sender, txBuff) - return &tx + return &tx } func generateTx( - skSign crypto.PrivateKey, - signer crypto.SingleSigner, - args *txArgs, + skSign crypto.PrivateKey, + signer crypto.SingleSigner, + args *txArgs, ) *transaction.Transaction { - tx := &transaction.Transaction{ - Nonce: args.nonce, - Value: args.value, - RcvAddr: args.rcvAddr, - SndAddr: args.sndAddr, - GasPrice: uint64(args.gasPrice), - GasLimit: uint64(args.gasLimit), - Data: args.data, - } - txBuff, _ := TestMarshalizer.Marshal(tx) - tx.Signature, _ = signer.Sign(skSign, txBuff) + tx := &transaction.Transaction{ + Nonce: args.nonce, + Value: args.value, + RcvAddr: args.rcvAddr, + SndAddr: args.sndAddr, + GasPrice: uint64(args.gasPrice), + GasLimit: uint64(args.gasLimit), + Data: args.data, + } + txBuff, _ := TestMarshalizer.Marshal(tx) + tx.Signature, _ = signer.Sign(skSign, txBuff) - return tx + return tx } func skToPk(sk crypto.PrivateKey) []byte { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - return pkBuff + pkBuff, _ := sk.GeneratePublic().ToByteArray() + return pkBuff } // TestPrivateKeyHasBalance checks if the private key has the expected balance func TestPrivateKeyHasBalance(t *testing.T, n *TestProcessorNode, sk crypto.PrivateKey, expectedBalance *big.Int) { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) - account, _ := n.AccntState.GetExistingAccount(addr) - assert.Equal(t, expectedBalance, account.(*state.Account).Balance) + pkBuff, _ := sk.GeneratePublic().ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetExistingAccount(addr) + assert.Equal(t, expectedBalance, account.(*state.Account).Balance) } // GetMiniBlocksHashesFromShardIds returns miniblock hashes from body func GetMiniBlocksHashesFromShardIds(body dataBlock.Body, shardIds ...uint32) [][]byte { - hashes := make([][]byte, 0) + hashes := make([][]byte, 0) - for _, miniblock := range body { - for _, shardId := range shardIds { - if miniblock.ReceiverShardID == shardId { - buff, _ := TestMarshalizer.Marshal(miniblock) - hashes = append(hashes, TestHasher.Compute(string(buff))) - } - } - } + for _, miniblock := range body { + for _, shardId := range shardIds { + if miniblock.ReceiverShardID == shardId { + buff, _ := TestMarshalizer.Marshal(miniblock) + hashes = append(hashes, TestHasher.Compute(string(buff))) + } + } + } - return hashes + return hashes } // GenerateSkAndPkInShard generates and returns a private and a public key that reside in a given shard. // It also returns the key generator func GenerateSkAndPkInShard( - coordinator sharding.Coordinator, - shardId uint32, + coordinator sharding.Coordinator, + shardId uint32, ) (crypto.PrivateKey, crypto.PublicKey, crypto.KeyGenerator) { - suite := kyber.NewBlakeSHA256Ed25519() - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() + suite := kyber.NewBlakeSHA256Ed25519() + keyGen := signing.NewKeyGenerator(suite) + sk, pk := keyGen.GeneratePair() - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if coordinator.ComputeId(addr) == shardId { - break - } - sk, pk = keyGen.GeneratePair() - } + for { + pkBytes, _ := pk.ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) + if coordinator.ComputeId(addr) == shardId { + break + } + sk, pk = keyGen.GeneratePair() + } - return sk, pk, keyGen + return sk, pk, keyGen } // CreateMintingForSenders creates account with balances for every node in a given shard func CreateMintingForSenders( - nodes []*TestProcessorNode, - senderShard uint32, - sendersPrivateKeys []crypto.PrivateKey, - value *big.Int, + nodes []*TestProcessorNode, + senderShard uint32, + sendersPrivateKeys []crypto.PrivateKey, + value *big.Int, ) { - for _, n := range nodes { - //only sender shard nodes will be minted - if n.ShardCoordinator.SelfId() != senderShard { - continue - } + for _, n := range nodes { + //only sender shard nodes will be minted + if n.ShardCoordinator.SelfId() != senderShard { + continue + } - for _, sk := range sendersPrivateKeys { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - adr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) - account, _ := n.AccntState.GetAccountWithJournal(adr) - _ = account.(*state.Account).SetBalanceWithJournal(value) - } + for _, sk := range sendersPrivateKeys { + pkBuff, _ := sk.GeneratePublic().ToByteArray() + adr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetAccountWithJournal(adr) + _ = account.(*state.Account).SetBalanceWithJournal(value) + } - _, _ = n.AccntState.Commit() - } + _, _ = n.AccntState.Commit() + } } // ProposeBlockSignalsEmptyBlock proposes and broadcasts a block func ProposeBlockSignalsEmptyBlock( - node *TestProcessorNode, - round uint64, - nonce uint64, + node *TestProcessorNode, + round uint64, + nonce uint64, ) (data.HeaderHandler, data.BodyHandler, bool) { - fmt.Println("Proposing block without commit...") + fmt.Println("Proposing block without commit...") - body, header, txHashes := node.ProposeBlock(round, nonce) - node.BroadcastBlock(body, header) - isEmptyBlock := len(txHashes) == 0 + body, header, txHashes := node.ProposeBlock(round, nonce) + node.BroadcastBlock(body, header) + isEmptyBlock := len(txHashes) == 0 - fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) + fmt.Println("Delaying for disseminating headers and miniblocks...") + time.Sleep(stepDelay) - return header, body, isEmptyBlock + return header, body, isEmptyBlock } // CreateAccountForNodes creates accounts for each node and commits the accounts state func CreateAccountForNodes(nodes []*TestProcessorNode) { - for i := 0; i < len(nodes); i++ { - CreateAccountForNode(nodes[i]) - } + for i := 0; i < len(nodes); i++ { + CreateAccountForNode(nodes[i]) + } } // CreateAccountForNode creates an account for the given node func CreateAccountForNode(node *TestProcessorNode) { - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(node.OwnAccount.PkTxSignBytes) - _, _ = node.AccntState.GetAccountWithJournal(addr) - _, _ = node.AccntState.Commit() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(node.OwnAccount.PkTxSignBytes) + _, _ = node.AccntState.GetAccountWithJournal(addr) + _, _ = node.AccntState.Commit() } // ComputeAndRequestMissingTransactions computes missing transactions for each node, and requests them func ComputeAndRequestMissingTransactions( - nodes []*TestProcessorNode, - generatedTxHashes [][]byte, - shardResolver uint32, - shardRequesters ...uint32, + nodes []*TestProcessorNode, + generatedTxHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, ) { - for _, n := range nodes { - if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { - continue - } + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } - neededTxs := getMissingTxsForNode(n, generatedTxHashes) - requestMissingTransactions(n, shardResolver, neededTxs) - } + neededTxs := getMissingTxsForNode(n, generatedTxHashes) + requestMissingTransactions(n, shardResolver, neededTxs) + } } func ComputeAndRequestMissingRewardTxs( - nodes []*TestProcessorNode, - generatedDataHashes [][]byte, - shardResolver uint32, - shardRequesters ...uint32, + nodes []*TestProcessorNode, + generatedDataHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, ) { - for _, n := range nodes { - if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { - continue - } + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } - neededData := getMissingRewardTxsForNode(n, generatedDataHashes) - requestMissingRewardTxs(n, shardResolver, neededData) - } + neededData := getMissingRewardTxsForNode(n, generatedDataHashes) + requestMissingRewardTxs(n, shardResolver, neededData) + } } func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { - neededTxs := make([][]byte, 0) + neededTxs := make([][]byte, 0) - for i := 0; i < len(generatedTxHashes); i++ { - _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) - if !ok { - //tx is still missing - neededTxs = append(neededTxs, generatedTxHashes[i]) - } - } + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) + if !ok { + //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } - return neededTxs + return neededTxs } func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { - neededTxs := make([][]byte, 0) + neededTxs := make([][]byte, 0) - for i := 0; i < len(generatedTxHashes); i++ { - _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) - if !ok { - //tx is still missing - neededTxs = append(neededTxs, generatedTxHashes[i]) - } - } + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) + if !ok { + //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } - return neededTxs + return neededTxs } func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, neededTxs [][]byte) { - txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) + txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) - for i := 0; i < len(neededTxs); i++ { - _ = txResolver.RequestDataFromHash(neededTxs[i]) - } + for i := 0; i < len(neededTxs); i++ { + _ = txResolver.RequestDataFromHash(neededTxs[i]) + } } func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededData [][]byte) { - dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) + dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) - for i := 0; i < len(neededData); i++ { - _ = dataResolver.RequestDataFromHash(neededData[i]) - } + for i := 0; i < len(neededData); i++ { + _ = dataResolver.RequestDataFromHash(neededData[i]) + } } // CreateRequesterDataPool creates a datapool with a mock txPool func CreateRequesterDataPool( - t *testing.T, - recvTxs map[int]map[string]struct{}, - mutRecvTxs *sync.Mutex, - nodeIndex int, - nbShards uint32, + t *testing.T, + recvTxs map[int]map[string]struct{}, + mutRecvTxs *sync.Mutex, + nodeIndex int, + nbShards uint32, ) dataRetriever.PoolsHolder { - //not allowed to request data from the same shard - return CreateTestShardDataPool( - &mock.ShardedDataStub{ - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil, false - }, - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - - txMap := recvTxs[nodeIndex] - if txMap == nil { - txMap = make(map[string]struct{}) - recvTxs[nodeIndex] = txMap - } - - txMap[string(key)] = struct{}{} - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, - }, - nbShards, - ) + //not allowed to request data from the same shard + return CreateTestShardDataPool( + &mock.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil, false + }, + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + + txMap := recvTxs[nodeIndex] + if txMap == nil { + txMap = make(map[string]struct{}) + recvTxs[nodeIndex] = txMap + } + + txMap[string(key)] = struct{}{} + }, + RegisterHandlerCalled: func(i func(key []byte)) { + }, + }, + nbShards, + ) } // CreateResolversDataPool creates a datapool containing a given number of transactions func CreateResolversDataPool( - t *testing.T, - maxTxs int, - senderShardID uint32, - recvShardId uint32, - shardCoordinator sharding.Coordinator, + t *testing.T, + maxTxs int, + senderShardID uint32, + recvShardId uint32, + shardCoordinator sharding.Coordinator, ) (dataRetriever.PoolsHolder, [][]byte) { - txHashes := make([][]byte, maxTxs) + txHashes := make([][]byte, maxTxs) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - for i := 0; i < maxTxs; i++ { - tx, txHash := generateValidTx(t, shardCoordinator, senderShardID, recvShardId) - cacherIdentifier := process.ShardCacherIdentifier(1, 0) - txPool.AddData(txHash, tx, cacherIdentifier) - txHashes[i] = txHash - } + for i := 0; i < maxTxs; i++ { + tx, txHash := generateValidTx(t, shardCoordinator, senderShardID, recvShardId) + cacherIdentifier := process.ShardCacherIdentifier(1, 0) + txPool.AddData(txHash, tx, cacherIdentifier) + txHashes[i] = txHash + } - return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes + return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes } func generateValidTx( - t *testing.T, - shardCoordinator sharding.Coordinator, - senderShardId uint32, - receiverShardId uint32, + t *testing.T, + shardCoordinator sharding.Coordinator, + senderShardId uint32, + receiverShardId uint32, ) (*transaction.Transaction, []byte) { - skSender, pkSender, _ := GenerateSkAndPkInShard(shardCoordinator, senderShardId) - pkSenderBuff, _ := pkSender.ToByteArray() + skSender, pkSender, _ := GenerateSkAndPkInShard(shardCoordinator, senderShardId) + pkSenderBuff, _ := pkSender.ToByteArray() - _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) - pkRecvBuff, _ := pkRecv.ToByteArray() + _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) + pkRecvBuff, _ := pkRecv.ToByteArray() - accnts, _, _ := CreateAccountsDB(factory.UserAccount) - addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) - _, _ = accnts.GetAccountWithJournal(addrSender) - _, _ = accnts.Commit() + accnts, _, _ := CreateAccountsDB(factory.UserAccount) + addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) + _, _ = accnts.GetAccountWithJournal(addrSender) + _, _ = accnts.Commit() - mockNode, _ := node.NewNode( - node.WithMarshalizer(TestMarshalizer), - node.WithHasher(TestHasher), - node.WithAddressConverter(TestAddressConverter), - node.WithKeyGen(signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519())), - node.WithTxSingleSigner(&singlesig.SchnorrSigner{}), - node.WithTxSignPrivKey(skSender), - node.WithTxSignPubKey(pkSender), - node.WithAccountsAdapter(accnts), - ) + mockNode, _ := node.NewNode( + node.WithMarshalizer(TestMarshalizer), + node.WithHasher(TestHasher), + node.WithAddressConverter(TestAddressConverter), + node.WithKeyGen(signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519())), + node.WithTxSingleSigner(&singlesig.SchnorrSigner{}), + node.WithTxSignPrivKey(skSender), + node.WithTxSignPubKey(pkSender), + node.WithAccountsAdapter(accnts), + ) - tx, err := mockNode.GenerateTransaction( - hex.EncodeToString(pkSenderBuff), - hex.EncodeToString(pkRecvBuff), - big.NewInt(1), - "", - ) - assert.Nil(t, err) + tx, err := mockNode.GenerateTransaction( + hex.EncodeToString(pkSenderBuff), + hex.EncodeToString(pkRecvBuff), + big.NewInt(1), + "", + ) + assert.Nil(t, err) - txBuff, _ := TestMarshalizer.Marshal(tx) - txHash := TestHasher.Compute(string(txBuff)) + txBuff, _ := TestMarshalizer.Marshal(tx) + txHash := TestHasher.Compute(string(txBuff)) - return tx, txHash + return tx, txHash } // GetNumTxsWithDst returns the total number of transactions that have a certain destination shard func GetNumTxsWithDst(dstShardId uint32, dataPool dataRetriever.PoolsHolder, nrShards uint32) int { - txPool := dataPool.Transactions() - if txPool == nil { - return 0 - } + txPool := dataPool.Transactions() + if txPool == nil { + return 0 + } - sumTxs := 0 + sumTxs := 0 - for i := uint32(0); i < nrShards; i++ { - strCache := process.ShardCacherIdentifier(i, dstShardId) - txStore := txPool.ShardDataStore(strCache) - if txStore == nil { - continue - } - sumTxs += txStore.Len() - } + for i := uint32(0); i < nrShards; i++ { + strCache := process.ShardCacherIdentifier(i, dstShardId) + txStore := txPool.ShardDataStore(strCache) + if txStore == nil { + continue + } + sumTxs += txStore.Len() + } - return sumTxs + return sumTxs } // ProposeAndSyncBlocks proposes and syncs blocks until all transaction pools are empty func ProposeAndSyncBlocks( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, - nonce uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, + nonce uint64, ) (uint64, uint64) { - // if there are many transactions, they might not fit into the block body in only one round - for { - numTxsInPool := 0 - round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + // if there are many transactions, they might not fit into the block body in only one round + for { + numTxsInPool := 0 + round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - for _, idProposer := range idxProposers { - proposerNode := nodes[idProposer] - numTxsInPool = GetNumTxsWithDst( - proposerNode.ShardCoordinator.SelfId(), - proposerNode.ShardDataPool, - proposerNode.ShardCoordinator.NumberOfShards(), - ) + for _, idProposer := range idxProposers { + proposerNode := nodes[idProposer] + numTxsInPool = GetNumTxsWithDst( + proposerNode.ShardCoordinator.SelfId(), + proposerNode.ShardDataPool, + proposerNode.ShardCoordinator.NumberOfShards(), + ) - if numTxsInPool > 0 { - break - } - } + if numTxsInPool > 0 { + break + } + } - if numTxsInPool == 0 { - break - } - } + if numTxsInPool == 0 { + break + } + } - if nodes[0].ShardCoordinator.NumberOfShards() == 1 { - return round, nonce - } + if nodes[0].ShardCoordinator.NumberOfShards() == 1 { + return round, nonce + } - // cross shard smart contract call is first processed at sender shard, notarized by metachain, processed at - // shard with smart contract, smart contract result is notarized by metachain, then finally processed at the - // sender shard - numberToPropagateToEveryShard := 5 - for i := 0; i < numberToPropagateToEveryShard; i++ { - round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - } + // cross shard smart contract call is first processed at sender shard, notarized by metachain, processed at + // shard with smart contract, smart contract result is notarized by metachain, then finally processed at the + // sender shard + numberToPropagateToEveryShard := 5 + for i := 0; i < numberToPropagateToEveryShard; i++ { + round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + } - return round, nonce + return round, nonce } // ProposeAndSyncOneBlock proposes a block, syncs the block and then increments the round func ProposeAndSyncOneBlock( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, - nonce uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, + nonce uint64, ) (uint64, uint64) { - ProposeBlock(nodes, idxProposers, round, nonce) - SyncBlock(t, nodes, idxProposers, round) - round = IncrementAndPrintRound(round) - nonce++ + ProposeBlock(nodes, idxProposers, round, nonce) + SyncBlock(t, nodes, idxProposers, round) + round = IncrementAndPrintRound(round) + nonce++ - return round, nonce + return round, nonce } // PubKeysMapFromKeysMap returns a map of public keys per shard from the key pairs per shard map. func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { - keysMap := make(map[uint32][]string, 0) + keysMap := make(map[uint32][]string, 0) - for shardId, pairList := range keyPairMap { - shardKeys := make([]string, len(pairList)) - for i, pair := range pairList { - b, _ := pair.Pk.ToByteArray() - shardKeys[i] = string(b) - } - keysMap[shardId] = shardKeys - } + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.Pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } - return keysMap + return keysMap } // GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) + validatorsMap := make(map[uint32][]sharding.Validator) - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - address := []byte(shardNodesPks[i][:32]) - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + address := []byte(shardNodesPks[i][:32]) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } - return validatorsMap + return validatorsMap } // CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { - suite := kyber.NewSuitePairingBn256() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - - keysMap := make(map[uint32][]*TestKeyPair) - keyPairs := make([]*TestKeyPair, nodesPerShard) - for shardId := uint32(0); shardId < nbShards; shardId++ { - for n := 0; n < nodesPerShard; n++ { - kp := &TestKeyPair{} - kp.Sk, kp.Pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[shardId] = keyPairs - } - - keyPairs = make([]*TestKeyPair, nbMetaNodes) - for n := 0; n < nbMetaNodes; n++ { - kp := &TestKeyPair{} - kp.Sk, kp.Pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[sharding.MetachainShardId] = keyPairs - - params := &CryptoParams{ - Keys: keysMap, - KeyGen: keyGen, - SingleSigner: singleSigner, - } - - return params + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := uint32(0); shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[shardId] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &CryptoParams{ + Keys: keysMap, + KeyGen: keyGen, + SingleSigner: singleSigner, + } + + return params } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index a336b187a14..f742a7e7950 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1,47 +1,47 @@ package integrationTests import ( - "context" - "encoding/hex" - "fmt" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/node/external" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/pkg/errors" + "context" + "encoding/hex" + "fmt" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/node/external" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/pkg/errors" ) // TestHasher represents a Sha256 hasher @@ -61,769 +61,769 @@ var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { - Sk crypto.PrivateKey - Pk crypto.PublicKey + Sk crypto.PrivateKey + Pk crypto.PublicKey } //CryptoParams holds crypto parametres type CryptoParams struct { - KeyGen crypto.KeyGenerator - Keys map[uint32][]*TestKeyPair - SingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + Keys map[uint32][]*TestKeyPair + SingleSigner crypto.SingleSigner } // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator sharding.NodesCoordinator - SpecialAddressHandler process.SpecialAddressHandler - Messenger p2p.Messenger - - OwnAccount *TestWalletAccount - NodeKeys *TestKeyPair - - ShardDataPool dataRetriever.PoolsHolder - MetaDataPool dataRetriever.MetaPoolsHolder - Storage dataRetriever.StorageService - AccntState state.AccountsAdapter - BlockChain data.ChainHandler - GenesisBlocks map[uint32]data.HeaderHandler - - InterceptorsContainer process.InterceptorsContainer - ResolversContainer dataRetriever.ResolversContainer - ResolverFinder dataRetriever.ResolversFinder - RequestHandler process.RequestHandler - - InterimProcContainer process.IntermediateProcessorContainer - TxProcessor process.TransactionProcessor - TxCoordinator process.TransactionCoordinator - ScrForwarder process.IntermediateTransactionHandler - VmProcessor vmcommon.VMExecutionHandler - VmDataGetter vmcommon.VMExecutionHandler - BlockchainHook vmcommon.BlockchainHook - ArgsParser process.ArgumentsParser - ScProcessor process.SmartContractProcessor - RewardsProcessor process.RewardTransactionProcessor - PreProcessorsContainer process.PreProcessorsContainer - - ForkDetector process.ForkDetector - BlockTracker process.BlocksTracker - BlockProcessor process.BlockProcessor - BroadcastMessenger consensus.BroadcastMessenger - - MultiSigner crypto.MultiSigner - - //Node is used to call the functionality already implemented in it - Node *node.Node - ScDataGetter external.ScDataGetter - - CounterHdrRecv int32 - CounterMbRecv int32 - CounterTxRecv int32 - CounterMetaRcv int32 + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Messenger p2p.Messenger + + OwnAccount *TestWalletAccount + NodeKeys *TestKeyPair + + ShardDataPool dataRetriever.PoolsHolder + MetaDataPool dataRetriever.MetaPoolsHolder + Storage dataRetriever.StorageService + AccntState state.AccountsAdapter + BlockChain data.ChainHandler + GenesisBlocks map[uint32]data.HeaderHandler + + InterceptorsContainer process.InterceptorsContainer + ResolversContainer dataRetriever.ResolversContainer + ResolverFinder dataRetriever.ResolversFinder + RequestHandler process.RequestHandler + + InterimProcContainer process.IntermediateProcessorContainer + TxProcessor process.TransactionProcessor + TxCoordinator process.TransactionCoordinator + ScrForwarder process.IntermediateTransactionHandler + VmProcessor vmcommon.VMExecutionHandler + VmDataGetter vmcommon.VMExecutionHandler + BlockchainHook vmcommon.BlockchainHook + ArgsParser process.ArgumentsParser + ScProcessor process.SmartContractProcessor + RewardsProcessor process.RewardTransactionProcessor + PreProcessorsContainer process.PreProcessorsContainer + + ForkDetector process.ForkDetector + BlockTracker process.BlocksTracker + BlockProcessor process.BlockProcessor + BroadcastMessenger consensus.BroadcastMessenger + + MultiSigner crypto.MultiSigner + + //Node is used to call the functionality already implemented in it + Node *node.Node + ScDataGetter external.ScDataGetter + + CounterHdrRecv int32 + CounterMbRecv int32 + CounterTxRecv int32 + CounterMetaRcv int32 } // NewTestProcessorNode returns a new TestProcessorNode instance func NewTestProcessorNode( - maxShards uint32, - nodeShardId uint32, - txSignPrivKeyShardId uint32, - initialNodeAddr string, + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, ) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - nodesCoordinator := &mock.NodesCoordinatorMock{} - kg := &mock.KeyGenMock{} - sk, pk := kg.GeneratePair() - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - - tpn.NodeKeys = &TestKeyPair{ - Sk: sk, - Pk: pk, - } - tpn.MultiSigner = TestMultiSig - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) - tpn.initDataPools() - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn } // NewTestProcessorNodeWithCustomDataPool returns a new TestProcessorNode instance with the given data pool func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string, dPool dataRetriever.PoolsHolder) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - nodesCoordinator := &mock.NodesCoordinatorMock{} - kg := &mock.KeyGenMock{} - sk, pk := kg.GeneratePair() - - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - - tpn.NodeKeys = &TestKeyPair{ - Sk: sk, - Pk: pk, - } - tpn.MultiSigner = TestMultiSig - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) - if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { - tpn.ShardDataPool = dPool - } else { - tpn.initDataPools() - } - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) + if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { + tpn.ShardDataPool = dPool + } else { + tpn.initDataPools() + } + tpn.initTestNode() + + return tpn } func (tpn *TestProcessorNode) initTestNode() { - tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ - ShardCoordinator:tpn.ShardCoordinator, - AdrConv: TestAddressConverter, - } - tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(0) - tpn.initChainHandler() - tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) - tpn.initInterceptors() - tpn.initResolvers() - tpn.initInnerProcessors() - tpn.initBlockProcessor() - tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - TestMarshalizer, - tpn.Messenger, - tpn.ShardCoordinator, - tpn.OwnAccount.SkTxSign, - tpn.OwnAccount.SingleSigner, - ) - tpn.setGenesisBlock() - tpn.initNode() - tpn.ScDataGetter, _ = smartContract.NewSCDataGetter(tpn.VmDataGetter) - tpn.addHandlersForCounters() + tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ + ShardCoordinator: tpn.ShardCoordinator, + AdrConv: TestAddressConverter, + } + tpn.initStorage() + tpn.AccntState, _, _ = CreateAccountsDB(0) + tpn.initChainHandler() + tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.initInterceptors() + tpn.initResolvers() + tpn.initInnerProcessors() + tpn.initBlockProcessor() + tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + TestMarshalizer, + tpn.Messenger, + tpn.ShardCoordinator, + tpn.OwnAccount.SkTxSign, + tpn.OwnAccount.SingleSigner, + ) + tpn.setGenesisBlock() + tpn.initNode() + tpn.ScDataGetter, _ = smartContract.NewSCDataGetter(tpn.VmDataGetter) + tpn.addHandlersForCounters() } func (tpn *TestProcessorNode) initDataPools() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.MetaDataPool = CreateTestMetaDataPool() - } else { - tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.MetaDataPool = CreateTestMetaDataPool() + } else { + tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) + } } func (tpn *TestProcessorNode) initStorage() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) - } else { - tpn.Storage = CreateShardStore(tpn.ShardCoordinator.NumberOfShards()) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) + } else { + tpn.Storage = CreateShardStore(tpn.ShardCoordinator.NumberOfShards()) + } } func (tpn *TestProcessorNode) initChainHandler() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockChain = CreateMetaChain() - } else { - tpn.BlockChain = CreateShardChain() - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.BlockChain = CreateMetaChain() + } else { + tpn.BlockChain = CreateShardChain() + } } func (tpn *TestProcessorNode) initInterceptors() { - var err error - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - TestHasher, - TestMultiSig, - tpn.MetaDataPool, - ) - - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - } else { - interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - TestHasher, - tpn.OwnAccount.KeygenTxSign, - tpn.OwnAccount.SingleSigner, - TestMultiSig, - tpn.ShardDataPool, - TestAddressConverter, - ) - - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - } + var err error + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + TestHasher, + TestMultiSig, + tpn.MetaDataPool, + ) + + tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } else { + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + TestHasher, + tpn.OwnAccount.KeygenTxSign, + tpn.OwnAccount.SingleSigner, + TestMultiSig, + tpn.ShardDataPool, + TestAddressConverter, + ) + + tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } } func (tpn *TestProcessorNode) initResolvers() { - dataPacker, _ := partitioning.NewSizeDataPacker(TestMarshalizer) - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( - tpn.ShardCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - tpn.MetaDataPool, - TestUint64Converter, - ) - - tpn.ResolversContainer, _ = resolversContainerFactory.Create() - tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) - tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( - tpn.ResolverFinder, - factory.ShardHeadersForMetachainTopic, - ) - } else { - resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( - tpn.ShardCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - tpn.ShardDataPool, - TestUint64Converter, - dataPacker, - ) - - tpn.ResolversContainer, _ = resolversContainerFactory.Create() - tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) - tpn.RequestHandler, _ = requestHandlers.NewShardResolverRequestHandler( - tpn.ResolverFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - 100, - ) - } + dataPacker, _ := partitioning.NewSizeDataPacker(TestMarshalizer) + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( + tpn.ShardCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + tpn.MetaDataPool, + TestUint64Converter, + ) + + tpn.ResolversContainer, _ = resolversContainerFactory.Create() + tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) + tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( + tpn.ResolverFinder, + factory.ShardHeadersForMetachainTopic, + ) + } else { + resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( + tpn.ShardCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + tpn.ShardDataPool, + TestUint64Converter, + dataPacker, + ) + + tpn.ResolversContainer, _ = resolversContainerFactory.Create() + tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) + tpn.RequestHandler, _ = requestHandlers.NewShardResolverRequestHandler( + tpn.ResolverFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) + } } func (tpn *TestProcessorNode) initInnerProcessors() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - return - } - - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - tpn.ShardCoordinator, - TestMarshalizer, - TestHasher, - TestAddressConverter, - tpn.SpecialAddressHandler, - tpn.Storage, - tpn.ShardDataPool, - ) - - tpn.InterimProcContainer, _ = interimProcFactory.Create() - tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) - rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) - rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) - - tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( - tpn.AccntState, - TestAddressConverter, - tpn.ShardCoordinator, - rewardsInter, - ) - - tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) - tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) - - vmContainer := &mock.VMContainerMock{ - GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { - return tpn.VmProcessor, nil - }} - - tpn.ArgsParser, _ = smartContract.NewAtArgumentParser() - tpn.ScProcessor, _ = smartContract.NewSmartContractProcessor( - vmContainer, - tpn.ArgsParser, - TestHasher, - TestMarshalizer, - tpn.AccntState, - tpn.BlockchainHook.(*hooks.VMAccountsDB), - TestAddressConverter, - tpn.ShardCoordinator, - tpn.ScrForwarder, - rewardsHandler, - ) - - txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) - - tpn.TxProcessor, _ = transaction.NewTxProcessor( - tpn.AccntState, - TestHasher, - TestAddressConverter, - TestMarshalizer, - tpn.ShardCoordinator, - tpn.ScProcessor, - rewardsHandler, - txTypeHandler, - ) - - fact, _ := shard.NewPreProcessorsContainerFactory( - tpn.ShardCoordinator, - tpn.Storage, - TestMarshalizer, - TestHasher, - tpn.ShardDataPool, - TestAddressConverter, - tpn.AccntState, - tpn.RequestHandler, - tpn.TxProcessor, - tpn.ScProcessor, - tpn.ScProcessor.(process.SmartContractResultProcessor), - tpn.RewardsProcessor, - ) - tpn.PreProcessorsContainer, _ = fact.Create() - - tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( - tpn.ShardCoordinator, - tpn.AccntState, - tpn.ShardDataPool, - tpn.RequestHandler, - tpn.PreProcessorsContainer, - tpn.InterimProcContainer, - ) + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + return + } + + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + tpn.ShardCoordinator, + TestMarshalizer, + TestHasher, + TestAddressConverter, + tpn.SpecialAddressHandler, + tpn.Storage, + tpn.ShardDataPool, + ) + + tpn.InterimProcContainer, _ = interimProcFactory.Create() + tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + rewardsInter, + ) + + tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) + tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) + + vmContainer := &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return tpn.VmProcessor, nil + }} + + tpn.ArgsParser, _ = smartContract.NewAtArgumentParser() + tpn.ScProcessor, _ = smartContract.NewSmartContractProcessor( + vmContainer, + tpn.ArgsParser, + TestHasher, + TestMarshalizer, + tpn.AccntState, + tpn.BlockchainHook.(*hooks.VMAccountsDB), + TestAddressConverter, + tpn.ShardCoordinator, + tpn.ScrForwarder, + rewardsHandler, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) + + tpn.TxProcessor, _ = transaction.NewTxProcessor( + tpn.AccntState, + TestHasher, + TestAddressConverter, + TestMarshalizer, + tpn.ShardCoordinator, + tpn.ScProcessor, + rewardsHandler, + txTypeHandler, + ) + + fact, _ := shard.NewPreProcessorsContainerFactory( + tpn.ShardCoordinator, + tpn.Storage, + TestMarshalizer, + TestHasher, + tpn.ShardDataPool, + TestAddressConverter, + tpn.AccntState, + tpn.RequestHandler, + tpn.TxProcessor, + tpn.ScProcessor, + tpn.ScProcessor.(process.SmartContractResultProcessor), + tpn.RewardsProcessor, + ) + tpn.PreProcessorsContainer, _ = fact.Create() + + tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( + tpn.ShardCoordinator, + tpn.AccntState, + tpn.ShardDataPool, + tpn.RequestHandler, + tpn.PreProcessorsContainer, + tpn.InterimProcContainer, + ) } func (tpn *TestProcessorNode) initBlockProcessor() { - var err error - - tpn.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - } - - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) - } else { - tpn.BlockProcessor, err = block.NewShardProcessor( - nil, - tpn.ShardDataPool, - tpn.Storage, - TestHasher, - TestMarshalizer, - tpn.AccntState, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - tpn.ForkDetector, - tpn.BlockTracker, - tpn.GenesisBlocks, - tpn.RequestHandler, - tpn.TxCoordinator, - TestUint64Converter, - ) - } - - if err != nil { - fmt.Printf("Error creating blockprocessor: %s\n", err.Error()) - } + var err error + + tpn.ForkDetector = &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + } + + tpn.BlockTracker = &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.BlockProcessor, err = block.NewMetaProcessor( + &mock.ServiceContainerMock{}, + tpn.AccntState, + tpn.MetaDataPool, + tpn.ForkDetector, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, + TestHasher, + TestMarshalizer, + tpn.Storage, + tpn.GenesisBlocks, + tpn.RequestHandler, + TestUint64Converter, + ) + } else { + tpn.BlockProcessor, err = block.NewShardProcessor( + nil, + tpn.ShardDataPool, + tpn.Storage, + TestHasher, + TestMarshalizer, + tpn.AccntState, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, + tpn.ForkDetector, + tpn.BlockTracker, + tpn.GenesisBlocks, + tpn.RequestHandler, + tpn.TxCoordinator, + TestUint64Converter, + ) + } + + if err != nil { + fmt.Printf("Error creating blockprocessor: %s\n", err.Error()) + } } func (tpn *TestProcessorNode) setGenesisBlock() { - genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] - _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) - hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) - tpn.BlockChain.SetGenesisHeaderHash(hash) + genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] + _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) + hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) + tpn.BlockChain.SetGenesisHeaderHash(hash) } func (tpn *TestProcessorNode) initNode() { - var err error - - tpn.Node, err = node.NewNode( - node.WithMessenger(tpn.Messenger), - node.WithMarshalizer(TestMarshalizer), - node.WithHasher(TestHasher), - node.WithHasher(TestHasher), - node.WithAddressConverter(TestAddressConverter), - node.WithAccountsAdapter(tpn.AccntState), - node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), - node.WithShardCoordinator(tpn.ShardCoordinator), - node.WithNodesCoordinator(tpn.NodesCoordinator), - node.WithBlockChain(tpn.BlockChain), - node.WithUint64ByteSliceConverter(TestUint64Converter), - node.WithMultiSigner(tpn.MultiSigner), - node.WithSingleSigner(tpn.OwnAccount.SingleSigner), - node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), - node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), - node.WithPrivKey(tpn.NodeKeys.Sk), - node.WithPubKey(tpn.NodeKeys.Pk), - node.WithInterceptorsContainer(tpn.InterceptorsContainer), - node.WithResolversFinder(tpn.ResolverFinder), - node.WithBlockProcessor(tpn.BlockProcessor), - node.WithTxSingleSigner(tpn.OwnAccount.SingleSigner), - node.WithDataStore(tpn.Storage), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - if err != nil { - fmt.Printf("Error creating node: %s\n", err.Error()) - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - err = tpn.Node.ApplyOptions( - node.WithMetaDataPool(tpn.MetaDataPool), - ) - } else { - err = tpn.Node.ApplyOptions( - node.WithDataPool(tpn.ShardDataPool), - ) - } - - if err != nil { - fmt.Printf("Error creating node: %s\n", err.Error()) - } + var err error + + tpn.Node, err = node.NewNode( + node.WithMessenger(tpn.Messenger), + node.WithMarshalizer(TestMarshalizer), + node.WithHasher(TestHasher), + node.WithHasher(TestHasher), + node.WithAddressConverter(TestAddressConverter), + node.WithAccountsAdapter(tpn.AccntState), + node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), + node.WithShardCoordinator(tpn.ShardCoordinator), + node.WithNodesCoordinator(tpn.NodesCoordinator), + node.WithBlockChain(tpn.BlockChain), + node.WithUint64ByteSliceConverter(TestUint64Converter), + node.WithMultiSigner(tpn.MultiSigner), + node.WithSingleSigner(tpn.OwnAccount.SingleSigner), + node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), + node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), + node.WithPrivKey(tpn.NodeKeys.Sk), + node.WithPubKey(tpn.NodeKeys.Pk), + node.WithInterceptorsContainer(tpn.InterceptorsContainer), + node.WithResolversFinder(tpn.ResolverFinder), + node.WithBlockProcessor(tpn.BlockProcessor), + node.WithTxSingleSigner(tpn.OwnAccount.SingleSigner), + node.WithDataStore(tpn.Storage), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + if err != nil { + fmt.Printf("Error creating node: %s\n", err.Error()) + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + err = tpn.Node.ApplyOptions( + node.WithMetaDataPool(tpn.MetaDataPool), + ) + } else { + err = tpn.Node.ApplyOptions( + node.WithDataPool(tpn.ShardDataPool), + ) + } + + if err != nil { + fmt.Printf("Error creating node: %s\n", err.Error()) + } } // SendTransaction can send a transaction (it does the dispatching) func (tpn *TestProcessorNode) SendTransaction(tx *dataTransaction.Transaction) (string, error) { - txHash, err := tpn.Node.SendTransaction( - tx.Nonce, - hex.EncodeToString(tx.SndAddr), - hex.EncodeToString(tx.RcvAddr), - tx.Value, - tx.GasPrice, - tx.GasLimit, - tx.Data, - tx.Signature, - ) - return txHash, err + txHash, err := tpn.Node.SendTransaction( + tx.Nonce, + hex.EncodeToString(tx.SndAddr), + hex.EncodeToString(tx.RcvAddr), + tx.Value, + tx.GasPrice, + tx.GasLimit, + tx.Data, + tx.Signature, + ) + return txHash, err } func (tpn *TestProcessorNode) addHandlersForCounters() { - metaHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterMetaRcv, 1) - } - hdrHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterHdrRecv, 1) - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.MetaDataPool.ShardHeaders().RegisterHandler(hdrHandlers) - tpn.MetaDataPool.MetaChainBlocks().RegisterHandler(metaHandlers) - } else { - txHandler := func(key []byte) { - atomic.AddInt32(&tpn.CounterTxRecv, 1) - } - mbHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterMbRecv, 1) - } - - tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) - tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) - tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) - tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) - tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) - tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) - } + metaHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterMetaRcv, 1) + } + hdrHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterHdrRecv, 1) + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.MetaDataPool.ShardHeaders().RegisterHandler(hdrHandlers) + tpn.MetaDataPool.MetaChainBlocks().RegisterHandler(metaHandlers) + } else { + txHandler := func(key []byte) { + atomic.AddInt32(&tpn.CounterTxRecv, 1) + } + mbHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterMbRecv, 1) + } + + tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) + tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) + tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) + tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) + tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) + tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) + } } // LoadTxSignSkBytes alters the already generated sk/pk pair func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { - tpn.OwnAccount.LoadTxSignSkBytes(skBytes) + tpn.OwnAccount.LoadTxSignSkBytes(skBytes) } // ProposeBlock proposes a new block func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { - startTime := time.Now() - maxTime := time.Second - - haveTime := func() bool { - elapsedTime := time.Since(startTime) - remainingTime := maxTime - elapsedTime - return remainingTime > 0 - } - - blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) - if err != nil { - fmt.Println(err.Error()) - return nil, nil, nil - } - blockHeader, err := tpn.BlockProcessor.CreateBlockHeader(blockBody, round, haveTime) - if err != nil { - fmt.Println(err.Error()) - return nil, nil, nil - } - - blockHeader.SetRound(round) - blockHeader.SetNonce(nonce) - blockHeader.SetPubKeysBitmap([]byte{1}) - sig, _ := TestMultiSig.AggregateSigs(nil) - blockHeader.SetSignature(sig) - currHdr := tpn.BlockChain.GetCurrentBlockHeader() - if currHdr == nil { - currHdr = tpn.BlockChain.GetGenesisHeader() - } - - buff, _ := TestMarshalizer.Marshal(currHdr) - blockHeader.SetPrevHash(TestHasher.Compute(string(buff))) - blockHeader.SetPrevRandSeed(currHdr.GetRandSeed()) - blockHeader.SetRandSeed(sig) - - shardBlockBody, ok := blockBody.(dataBlock.Body) - txHashes := make([][]byte, 0) - if !ok { - return blockBody, blockHeader, txHashes - } - - for _, mb := range shardBlockBody { - for _, hash := range mb.TxHashes { - copiedHash := make([]byte, len(hash)) - copy(copiedHash, hash) - txHashes = append(txHashes, copiedHash) - } - } - - return blockBody, blockHeader, txHashes + startTime := time.Now() + maxTime := time.Second + + haveTime := func() bool { + elapsedTime := time.Since(startTime) + remainingTime := maxTime - elapsedTime + return remainingTime > 0 + } + + blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) + if err != nil { + fmt.Println(err.Error()) + return nil, nil, nil + } + blockHeader, err := tpn.BlockProcessor.CreateBlockHeader(blockBody, round, haveTime) + if err != nil { + fmt.Println(err.Error()) + return nil, nil, nil + } + + blockHeader.SetRound(round) + blockHeader.SetNonce(nonce) + blockHeader.SetPubKeysBitmap([]byte{1}) + sig, _ := TestMultiSig.AggregateSigs(nil) + blockHeader.SetSignature(sig) + currHdr := tpn.BlockChain.GetCurrentBlockHeader() + if currHdr == nil { + currHdr = tpn.BlockChain.GetGenesisHeader() + } + + buff, _ := TestMarshalizer.Marshal(currHdr) + blockHeader.SetPrevHash(TestHasher.Compute(string(buff))) + blockHeader.SetPrevRandSeed(currHdr.GetRandSeed()) + blockHeader.SetRandSeed(sig) + + shardBlockBody, ok := blockBody.(dataBlock.Body) + txHashes := make([][]byte, 0) + if !ok { + return blockBody, blockHeader, txHashes + } + + for _, mb := range shardBlockBody { + for _, hash := range mb.TxHashes { + copiedHash := make([]byte, len(hash)) + copy(copiedHash, hash) + txHashes = append(txHashes, copiedHash) + } + } + + return blockBody, blockHeader, txHashes } // BroadcastBlock broadcasts the block and body to the connected peers func (tpn *TestProcessorNode) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) { - _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) - _ = tpn.BroadcastMessenger.BroadcastHeader(header) - miniBlocks, transactions, _ := tpn.BlockProcessor.MarshalizedDataToBroadcast(header, body) - _ = tpn.BroadcastMessenger.BroadcastMiniBlocks(miniBlocks) - _ = tpn.BroadcastMessenger.BroadcastTransactions(transactions) + _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) + _ = tpn.BroadcastMessenger.BroadcastHeader(header) + miniBlocks, transactions, _ := tpn.BlockProcessor.MarshalizedDataToBroadcast(header, body) + _ = tpn.BroadcastMessenger.BroadcastMiniBlocks(miniBlocks) + _ = tpn.BroadcastMessenger.BroadcastTransactions(transactions) } // CommitBlock commits the block and body func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.HeaderHandler) { - _ = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) + _ = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) } // GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetShardHeader(nonce uint64) (*dataBlock.Header, error) { - invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil || tpn.ShardDataPool.HeadersNonces() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } - - syncMapHashNonce, ok := tpn.ShardDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) - } - - headerObject, ok := tpn.ShardDataPool.Headers().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } - - header, ok := headerObject.(*dataBlock.Header) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for hash %s", hex.EncodeToString(headerHash))) - } - - return header, nil + invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil || tpn.ShardDataPool.HeadersNonces() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } + + syncMapHashNonce, ok := tpn.ShardDataPool.HeadersNonces().Get(nonce) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) + } + + headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + } + + headerObject, ok := tpn.ShardDataPool.Headers().Get(headerHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) + } + + header, ok := headerObject.(*dataBlock.Header) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for hash %s", hex.EncodeToString(headerHash))) + } + + return header, nil } // GetBlockBody returns the body for provided header parameter func (tpn *TestProcessorNode) GetBlockBody(header *dataBlock.Header) (dataBlock.Body, error) { - invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.MiniBlocks() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } + invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.MiniBlocks() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } - body := dataBlock.Body{} - for _, miniBlockHeader := range header.MiniBlockHeaders { - miniBlockHash := miniBlockHeader.Hash + body := dataBlock.Body{} + for _, miniBlockHeader := range header.MiniBlockHeaders { + miniBlockHash := miniBlockHeader.Hash - mbObject, ok := tpn.ShardDataPool.MiniBlocks().Get(miniBlockHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no miniblock found for hash %s", hex.EncodeToString(miniBlockHash))) - } + mbObject, ok := tpn.ShardDataPool.MiniBlocks().Get(miniBlockHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no miniblock found for hash %s", hex.EncodeToString(miniBlockHash))) + } - mb, ok := mbObject.(*dataBlock.MiniBlock) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.MiniBlock stored in miniblocks found for hash %s", hex.EncodeToString(miniBlockHash))) - } + mb, ok := mbObject.(*dataBlock.MiniBlock) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.MiniBlock stored in miniblocks found for hash %s", hex.EncodeToString(miniBlockHash))) + } - body = append(body, mb) - } + body = append(body, mb) + } - return body, nil + return body, nil } // GetMetaHeader returns the first *dataBlock.MetaBlock stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetMetaHeader(nonce uint64) (*dataBlock.MetaBlock, error) { - invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.MetaChainBlocks() == nil || tpn.MetaDataPool.HeadersNonces() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } - - syncMapHashNonce, ok := tpn.MetaDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) - } - - headerObject, ok := tpn.MetaDataPool.MetaChainBlocks().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } - - header, ok := headerObject.(*dataBlock.MetaBlock) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for hash %s", hex.EncodeToString(headerHash))) - } - - return header, nil + invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.MetaChainBlocks() == nil || tpn.MetaDataPool.HeadersNonces() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } + + syncMapHashNonce, ok := tpn.MetaDataPool.HeadersNonces().Get(nonce) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) + } + + headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + } + + headerObject, ok := tpn.MetaDataPool.MetaChainBlocks().Get(headerHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) + } + + header, ok := headerObject.(*dataBlock.MetaBlock) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for hash %s", hex.EncodeToString(headerHash))) + } + + return header, nil } // SyncNode tries to process and commit a block already stored in data pool with provided nonce func (tpn *TestProcessorNode) SyncNode(nonce uint64) error { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - return tpn.syncMetaNode(nonce) - } else { - return tpn.syncShardNode(nonce) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + return tpn.syncMetaNode(nonce) + } else { + return tpn.syncShardNode(nonce) + } } func (tpn *TestProcessorNode) syncShardNode(nonce uint64) error { - header, err := tpn.GetShardHeader(nonce) - if err != nil { - return err - } - - body, err := tpn.GetBlockBody(header) - if err != nil { - return err - } - - err = tpn.BlockProcessor.ProcessBlock( - tpn.BlockChain, - header, - body, - func() time.Duration { - return time.Second * 2 - }, - ) - if err != nil { - return err - } - - err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) - if err != nil { - return err - } - - return nil + header, err := tpn.GetShardHeader(nonce) + if err != nil { + return err + } + + body, err := tpn.GetBlockBody(header) + if err != nil { + return err + } + + err = tpn.BlockProcessor.ProcessBlock( + tpn.BlockChain, + header, + body, + func() time.Duration { + return time.Second * 2 + }, + ) + if err != nil { + return err + } + + err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) + if err != nil { + return err + } + + return nil } func (tpn *TestProcessorNode) syncMetaNode(nonce uint64) error { - header, err := tpn.GetMetaHeader(nonce) - if err != nil { - return err - } - - err = tpn.BlockProcessor.ProcessBlock( - tpn.BlockChain, - header, - &dataBlock.MetaBlockBody{}, - func() time.Duration { - return time.Second * 2 - }, - ) - if err != nil { - return err - } - - err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, &dataBlock.MetaBlockBody{}) - if err != nil { - return err - } - - return nil + header, err := tpn.GetMetaHeader(nonce) + if err != nil { + return err + } + + err = tpn.BlockProcessor.ProcessBlock( + tpn.BlockChain, + header, + &dataBlock.MetaBlockBody{}, + func() time.Duration { + return time.Second * 2 + }, + ) + if err != nil { + return err + } + + err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, &dataBlock.MetaBlockBody{}) + if err != nil { + return err + } + + return nil } // SetAccountNonce sets the account nonce with journal func (tpn *TestProcessorNode) SetAccountNonce(nonce uint64) error { - nodeAccount, _ := tpn.AccntState.GetAccountWithJournal(tpn.OwnAccount.Address) - err := nodeAccount.(*state.Account).SetNonceWithJournal(nonce) - if err != nil { - return err - } - - _, err = tpn.AccntState.Commit() - if err != nil { - return err - } - - return nil + nodeAccount, _ := tpn.AccntState.GetAccountWithJournal(tpn.OwnAccount.Address) + err := nodeAccount.(*state.Account).SetNonceWithJournal(nonce) + if err != nil { + return err + } + + _, err = tpn.AccntState.Commit() + if err != nil { + return err + } + + return nil } // MiniBlocksPresent checks if the all the miniblocks are present in the pool func (tpn *TestProcessorNode) MiniBlocksPresent(hashes [][]byte) bool { - mbCacher := tpn.ShardDataPool.MiniBlocks() - for i := 0; i < len(hashes); i++ { - ok := mbCacher.Has(hashes[i]) - if !ok { - return false - } - } - - return true + mbCacher := tpn.ShardDataPool.MiniBlocks() + for i := 0; i < len(hashes); i++ { + ok := mbCacher.Has(hashes[i]) + if !ok { + return false + } + } + + return true } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index b2853b81148..e348a75ffd7 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -1,196 +1,196 @@ package integrationTests import ( - "bytes" - "context" - "fmt" - - "github.com/ElrondNetwork/elrond-go/cmd/node/factory" - "github.com/ElrondNetwork/elrond-go/crypto" - kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/hashing/blake2b" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "context" + "fmt" + + "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/crypto" + kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" ) // NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator func NewTestProcessorNodeWithCustomNodesCoordinator( - maxShards uint32, - nodeShardId uint32, - initialNodeAddr string, - nodesCoordinator sharding.NodesCoordinator, - cp *CryptoParams, - keyIndex int, + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, ) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] - - llsig := &kmultisig.KyberMultiSignerBLS{} - blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} - - pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) - - tpn.MultiSigner, _ = multisig.NewBLSMultisig( - llsig, - blsHasher, - pubKeysMap[nodeShardId], - tpn.NodeKeys.Sk, - cp.KeyGen, - 0, - ) - if tpn.MultiSigner == nil { - fmt.Println("Error generating multisigner") - } - accountShardId := nodeShardId - if nodeShardId == sharding.MetachainShardId { - accountShardId = 0 - } - - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) - tpn.initDataPools() - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + + llsig := &kmultisig.KyberMultiSignerBLS{} + blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + blsHasher, + pubKeysMap[nodeShardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + accountShardId := nodeShardId + if nodeShardId == sharding.MetachainShardId { + accountShardId = 0 + } + + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn } // CreateNodesWithNodesCoordinator returns a map with nodes per shard each using a real nodes coordinator func CreateNodesWithNodesCoordinator( - nodesPerShard int, - nbMetaNodes int, - nbShards int, - shardConsensusGroupSize int, - metaConsensusGroupSize int, - seedAddress string, + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, ) map[uint32][]*TestProcessorNode { - cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) - pubKeys := PubKeysMapFromKeysMap(cp.Keys) - validatorsMap := GenValidatorsFromPubKeys(pubKeys) - nodesMap := make(map[uint32][]*TestProcessorNode) - for shardId, validatorList := range validatorsMap { - nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - shardConsensusGroupSize, - metaConsensusGroupSize, - TestHasher, - shardId, - uint32(nbShards), - validatorsMap, - ) - - if err != nil { - fmt.Println("Error creating node coordinator") - } - - nodesList := make([]*TestProcessorNode, len(validatorList)) - for i := range validatorList { - nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( - uint32(nbShards), - shardId, - seedAddress, - nodesCoordinator, - cp, - i, - ) - } - nodesMap[shardId] = nodesList - } - - return nodesMap + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + TestHasher, + shardId, + uint32(nbShards), + validatorsMap, + ) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap } // ProposeBlockWithConsensusSignature proposes func ProposeBlockWithConsensusSignature( - shardId uint32, - nodesMap map[uint32][]*TestProcessorNode, - round uint64, - nonce uint64, - randomness []byte, + shardId uint32, + nodesMap map[uint32][]*TestProcessorNode, + round uint64, + nonce uint64, + randomness []byte, ) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { - nodesCoordinator := nodesMap[shardId][0].NodesCoordinator - pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) - if err != nil { - fmt.Println("Error getting the validators public keys: ", err) - } + nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) + if err != nil { + fmt.Println("Error getting the validators public keys: ", err) + } - adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) - // set the consensus reward addresses - for _, node := range nodesMap[shardId] { - node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) - } + // set the consensus reward addresses + for _, node := range nodesMap[shardId] { + node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) + } - consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) - // first node is block proposer - body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) - header.SetPrevRandSeed(randomness) - header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) + // first node is block proposer + body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) + header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) - return body, header, txHashes, consensusNodes + return body, header, txHashes, consensusNodes } func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { - selectedNodes := make([]*TestProcessorNode, len(pubKeys)) - cntNodes := 0 - - for i, pk := range pubKeys { - for _, node := range nodes { - pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() - if bytes.Equal(pubKeyBytes, []byte(pk)) { - selectedNodes[i] = node - cntNodes++ - } - } - } - - if cntNodes != len(pubKeys) { - fmt.Println("Error selecting nodes from public keys") - } - - return selectedNodes + selectedNodes := make([]*TestProcessorNode, len(pubKeys)) + cntNodes := 0 + + for i, pk := range pubKeys { + for _, node := range nodes { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + if bytes.Equal(pubKeyBytes, []byte(pk)) { + selectedNodes[i] = node + cntNodes++ + } + } + } + + if cntNodes != len(pubKeys) { + fmt.Println("Error selecting nodes from public keys") + } + + return selectedNodes } // DoConsensusSigningOnBlock simulates a consensus aggregated signature on the provided block func DoConsensusSigningOnBlock( - blockHeader data.HeaderHandler, - consensusNodes []*TestProcessorNode, - pubKeys []string, + blockHeader data.HeaderHandler, + consensusNodes []*TestProcessorNode, + pubKeys []string, ) data.HeaderHandler { - // set bitmap for all consensus nodes signing - bitmap := make([]byte, len(consensusNodes)/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) - blockHeader.SetPubKeysBitmap(bitmap) - // clear signature, as we need to compute it below - blockHeader.SetSignature(nil) - blockHeader.SetPubKeysBitmap(nil) - blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) - blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) - - var msig crypto.MultiSigner - msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) - _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) - - for i := 1; i < len(consensusNodes); i++ { - msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) - sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) - _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) - } - - sig, _ := msigProposer.AggregateSigs(bitmap) - blockHeader.SetSignature(sig) - blockHeader.SetPubKeysBitmap(bitmap) - - return blockHeader + // set bitmap for all consensus nodes signing + bitmap := make([]byte, len(consensusNodes)/8+1) + for i := range bitmap { + bitmap[i] = 0xFF + } + + bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) + blockHeader.SetPubKeysBitmap(bitmap) + // clear signature, as we need to compute it below + blockHeader.SetSignature(nil) + blockHeader.SetPubKeysBitmap(nil) + blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) + blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) + + var msig crypto.MultiSigner + msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) + _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) + + for i := 1; i < len(consensusNodes); i++ { + msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) + sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) + _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) + } + + sig, _ := msigProposer.AggregateSigs(bitmap) + blockHeader.SetSignature(sig) + blockHeader.SetPubKeysBitmap(bitmap) + + return blockHeader } diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 64560dbc1e7..7edf82f9c0e 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -1,85 +1,85 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) // BlockProcessorStub mocks the implementation for a blockProcessor type BlockProcessorStub struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockBodyCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockBodyCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorStub) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorStub) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorStub) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateGenesisBlock mocks the creation of a genesis block body func (blProcMock *BlockProcessorStub) CreateGenesisBlock(balances map[string]*big.Int) (data.HeaderHandler, error) { - return blProcMock.CreateGenesisBlockCalled(balances) + return blProcMock.CreateGenesisBlockCalled(balances) } // CreateTxBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorStub) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockBodyCalled(round, haveTime) + return blProcMock.CreateBlockBodyCalled(round, haveTime) } func (blProcMock *BlockProcessorStub) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorStub) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorStub) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorStub) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorStub) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string, uint64) { - panic("implement me") + panic("implement me") } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorStub) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index b07e72079a8..3659d098bd2 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -1,528 +1,528 @@ package block import ( - "bytes" - "fmt" - "sort" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "fmt" + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() type hashAndHdr struct { - hdr data.HeaderHandler - hash []byte + hdr data.HeaderHandler + hash []byte } type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - specialAddressHandler process.SpecialAddressHandler - accounts state.AccountsAdapter - forkDetector process.ForkDetector - hasher hashing.Hasher - marshalizer marshal.Marshalizer - store dataRetriever.StorageService - uint64Converter typeConverters.Uint64ByteSliceConverter - blockSizeThrottler process.BlockSizeThrottler - - mutNotarizedHdrs sync.RWMutex - notarizedHdrs mapShardHeaders - - onRequestHeaderHandlerByNonce func(shardId uint32, nonce uint64) - onRequestHeaderHandler func(shardId uint32, hash []byte) - - appStatusHandler core.AppStatusHandler + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + specialAddressHandler process.SpecialAddressHandler + accounts state.AccountsAdapter + forkDetector process.ForkDetector + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + uint64Converter typeConverters.Uint64ByteSliceConverter + blockSizeThrottler process.BlockSizeThrottler + + mutNotarizedHdrs sync.RWMutex + notarizedHdrs mapShardHeaders + + onRequestHeaderHandlerByNonce func(shardId uint32, nonce uint64) + onRequestHeaderHandler func(shardId uint32, hash []byte) + + appStatusHandler core.AppStatusHandler } func checkForNils( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - if chainHandler == nil || chainHandler.IsInterfaceNil() { - return process.ErrNilBlockChain - } - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return process.ErrNilBlockBody - } - return nil + if chainHandler == nil || chainHandler.IsInterfaceNil() { + return process.ErrNilBlockChain + } + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return process.ErrNilBlockBody + } + return nil } // SetAppStatusHandler method is used to set appStatusHandler func (bp *baseProcessor) SetAppStatusHandler(ash core.AppStatusHandler) error { - if ash == nil || ash.IsInterfaceNil() { - return process.ErrNilAppStatusHandler - } + if ash == nil || ash.IsInterfaceNil() { + return process.ErrNilAppStatusHandler + } - bp.appStatusHandler = ash - return nil + bp.appStatusHandler = ash + return nil } // RevertAccountState reverts the account state for cleanup failed process func (bp *baseProcessor) RevertAccountState() { - err := bp.accounts.RevertToSnapshot(0) - if err != nil { - log.Error(err.Error()) - } + err := bp.accounts.RevertToSnapshot(0) + if err != nil { + log.Error(err.Error()) + } } // AddLastNotarizedHdr adds the last notarized header func (bp *baseProcessor) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - bp.mutNotarizedHdrs.Lock() - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) + bp.mutNotarizedHdrs.Unlock() } // checkBlockValidity method checks if the given block is valid func (bp *baseProcessor) checkBlockValidity( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - err := checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } + err := checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } - currentBlockHeader := chainHandler.GetCurrentBlockHeader() + currentBlockHeader := chainHandler.GetCurrentBlockHeader() - if currentBlockHeader == nil { - if headerHandler.GetNonce() == 1 { // first block after genesis - if bytes.Equal(headerHandler.GetPrevHash(), chainHandler.GetGenesisHeaderHash()) { - // TODO: add genesis block verification - return nil - } + if currentBlockHeader == nil { + if headerHandler.GetNonce() == 1 { // first block after genesis + if bytes.Equal(headerHandler.GetPrevHash(), chainHandler.GetGenesisHeaderHash()) { + // TODO: add genesis block verification + return nil + } - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", - core.ToB64(chainHandler.GetGenesisHeaderHash()), - core.ToB64(headerHandler.GetPrevHash()))) + log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + core.ToB64(chainHandler.GetGenesisHeaderHash()), + core.ToB64(headerHandler.GetPrevHash()))) - return process.ErrBlockHashDoesNotMatch - } + return process.ErrBlockHashDoesNotMatch + } - log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", - headerHandler.GetNonce())) + log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", + headerHandler.GetNonce())) - return process.ErrWrongNonceInBlock - } + return process.ErrWrongNonceInBlock + } - if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", - currentBlockHeader.GetRound(), headerHandler.GetRound())) + if headerHandler.GetRound() <= currentBlockHeader.GetRound() { + log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", + currentBlockHeader.GetRound(), headerHandler.GetRound())) - return process.ErrLowerRoundInBlock - } + return process.ErrLowerRoundInBlock + } - if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", - currentBlockHeader.GetNonce(), headerHandler.GetNonce())) + if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { + log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", + currentBlockHeader.GetNonce(), headerHandler.GetNonce())) - return process.ErrWrongNonceInBlock - } + return process.ErrWrongNonceInBlock + } - prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, currentBlockHeader) - if err != nil { - return err - } + prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, currentBlockHeader) + if err != nil { + return err + } - if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", - core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) + if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { + log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", + core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) - return process.ErrRandSeedMismatch - } + return process.ErrRandSeedMismatch + } - if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", - core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) + if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { + log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) - return process.ErrBlockHashDoesNotMatch - } + return process.ErrBlockHashDoesNotMatch + } - if bodyHandler != nil { - // TODO: add bodyHandler verification here - } + if bodyHandler != nil { + // TODO: add bodyHandler verification here + } - // TODO: add signature validation as well, with randomness source and all - return nil + // TODO: add signature validation as well, with randomness source and all + return nil } // verifyStateRoot verifies the state root hash given as parameter against the // Merkle trie root hash stored for accounts and returns if equal or not func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { - trieRootHash, err := bp.accounts.RootHash() - if err != nil { - log.Debug(err.Error()) - } + trieRootHash, err := bp.accounts.RootHash() + if err != nil { + log.Debug(err.Error()) + } - return bytes.Equal(trieRootHash, rootHash) + return bytes.Equal(trieRootHash, rootHash) } // getRootHash returns the accounts merkle tree root hash func (bp *baseProcessor) getRootHash() []byte { - rootHash, err := bp.accounts.RootHash() - if err != nil { - log.Debug(err.Error()) - } + rootHash, err := bp.accounts.RootHash() + if err != nil { + log.Debug(err.Error()) + } - return rootHash + return rootHash } func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { - if prevHdr == nil || prevHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if currHdr == nil || currHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - - // special case with genesis nonce - 0 - if currHdr.GetNonce() == 0 { - if prevHdr.GetNonce() != 0 { - return process.ErrWrongNonceInBlock - } - // block with nonce 0 was already saved - if prevHdr.GetRootHash() != nil { - return process.ErrRootStateMissmatch - } - return nil - } - - //TODO: add verification if rand seed was correctly computed add other verification - //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected - if prevHdr.GetRound() >= currHdr.GetRound() { - return process.ErrLowerRoundInOtherChainBlock - } - - if currHdr.GetNonce() != prevHdr.GetNonce()+1 { - return process.ErrWrongNonceInBlock - } - - prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, prevHdr) - if err != nil { - return err - } - - if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - return process.ErrRandSeedMismatch - } - - if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - return process.ErrHashDoesNotMatchInOtherChainBlock - } - - return nil + if prevHdr == nil || prevHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if currHdr == nil || currHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + + // special case with genesis nonce - 0 + if currHdr.GetNonce() == 0 { + if prevHdr.GetNonce() != 0 { + return process.ErrWrongNonceInBlock + } + // block with nonce 0 was already saved + if prevHdr.GetRootHash() != nil { + return process.ErrRootStateMissmatch + } + return nil + } + + //TODO: add verification if rand seed was correctly computed add other verification + //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected + if prevHdr.GetRound() >= currHdr.GetRound() { + return process.ErrLowerRoundInOtherChainBlock + } + + if currHdr.GetNonce() != prevHdr.GetNonce()+1 { + return process.ErrWrongNonceInBlock + } + + prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, prevHdr) + if err != nil { + return err + } + + if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { + return process.ErrRandSeedMismatch + } + + if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { + return process.ErrHashDoesNotMatchInOtherChainBlock + } + + return nil } func (bp *baseProcessor) checkHeaderTypeCorrect(shardId uint32, hdr data.HeaderHandler) error { - if shardId >= bp.shardCoordinator.NumberOfShards() && shardId != sharding.MetachainShardId { - return process.ErrShardIdMissmatch - } - - if shardId < bp.shardCoordinator.NumberOfShards() { - _, ok := hdr.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - if shardId == sharding.MetachainShardId { - _, ok := hdr.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - return nil + if shardId >= bp.shardCoordinator.NumberOfShards() && shardId != sharding.MetachainShardId { + return process.ErrShardIdMissmatch + } + + if shardId < bp.shardCoordinator.NumberOfShards() { + _, ok := hdr.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + } + + if shardId == sharding.MetachainShardId { + _, ok := hdr.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + } + + return nil } func (bp *baseProcessor) removeNotarizedHdrsBehindFinal(hdrsToAttestFinality uint32) { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := uint32(len(bp.notarizedHdrs[shardId])) - if notarizedHdrsCount > hdrsToAttestFinality { - finalIndex := notarizedHdrsCount - 1 - hdrsToAttestFinality - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][finalIndex:] - } - } - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + for shardId := range bp.notarizedHdrs { + notarizedHdrsCount := uint32(len(bp.notarizedHdrs[shardId])) + if notarizedHdrsCount > hdrsToAttestFinality { + finalIndex := notarizedHdrsCount - 1 - hdrsToAttestFinality + bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][finalIndex:] + } + } + bp.mutNotarizedHdrs.Unlock() } func (bp *baseProcessor) removeLastNotarized() { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 0 { - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:notarizedHdrsCount-1] - } - } - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + for shardId := range bp.notarizedHdrs { + notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) + if notarizedHdrsCount > 0 { + bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:notarizedHdrsCount-1] + } + } + bp.mutNotarizedHdrs.Unlock() } func (bp *baseProcessor) lastNotarizedHdrForShard(shardId uint32) data.HeaderHandler { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 0 { - return bp.notarizedHdrs[shardId][notarizedHdrsCount-1] - } + notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) + if notarizedHdrsCount > 0 { + return bp.notarizedHdrs[shardId][notarizedHdrsCount-1] + } - return nil + return nil } func (bp *baseProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + defer bp.mutNotarizedHdrs.Unlock() - if bp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } + if bp.notarizedHdrs == nil { + return process.ErrNotarizedHdrsSliceIsNil + } - err := bp.checkHeaderTypeCorrect(shardId, bp.lastNotarizedHdrForShard(shardId)) - if err != nil { - return err - } + err := bp.checkHeaderTypeCorrect(shardId, bp.lastNotarizedHdrForShard(shardId)) + if err != nil { + return err + } - sort.Slice(processedHdrs, func(i, j int) bool { - return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() - }) + sort.Slice(processedHdrs, func(i, j int) bool { + return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() + }) - tmpLastNotarizedHdrForShard := bp.lastNotarizedHdrForShard(shardId) + tmpLastNotarizedHdrForShard := bp.lastNotarizedHdrForShard(shardId) - for i := 0; i < len(processedHdrs); i++ { - err = bp.checkHeaderTypeCorrect(shardId, processedHdrs[i]) - if err != nil { - return err - } + for i := 0; i < len(processedHdrs); i++ { + err = bp.checkHeaderTypeCorrect(shardId, processedHdrs[i]) + if err != nil { + return err + } - err = bp.isHdrConstructionValid(processedHdrs[i], tmpLastNotarizedHdrForShard) - if err != nil { - return err - } + err = bp.isHdrConstructionValid(processedHdrs[i], tmpLastNotarizedHdrForShard) + if err != nil { + return err + } - tmpLastNotarizedHdrForShard = processedHdrs[i] - } + tmpLastNotarizedHdrForShard = processedHdrs[i] + } - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], tmpLastNotarizedHdrForShard) - DisplayLastNotarized(bp.marshalizer, bp.hasher, tmpLastNotarizedHdrForShard, shardId) + bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], tmpLastNotarizedHdrForShard) + DisplayLastNotarized(bp.marshalizer, bp.hasher, tmpLastNotarizedHdrForShard, shardId) - return nil + return nil } func (bp *baseProcessor) getLastNotarizedHdr(shardId uint32) (data.HeaderHandler, error) { - bp.mutNotarizedHdrs.RLock() - defer bp.mutNotarizedHdrs.RUnlock() + bp.mutNotarizedHdrs.RLock() + defer bp.mutNotarizedHdrs.RUnlock() - if bp.notarizedHdrs == nil { - return nil, process.ErrNotarizedHdrsSliceIsNil - } + if bp.notarizedHdrs == nil { + return nil, process.ErrNotarizedHdrsSliceIsNil + } - hdr := bp.lastNotarizedHdrForShard(shardId) + hdr := bp.lastNotarizedHdrForShard(shardId) - err := bp.checkHeaderTypeCorrect(shardId, hdr) - if err != nil { - return nil, err - } + err := bp.checkHeaderTypeCorrect(shardId, hdr) + if err != nil { + return nil, err + } - return hdr, nil + return hdr, nil } // SetLastNotarizedHeadersSlice sets the headers blocks in notarizedHdrs for every shard // This is done when starting a new epoch so metachain can use it when validating next shard header blocks // and shard can validate the next meta header func (bp *baseProcessor) setLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { - //TODO: protect this to be called only once at genesis time - //TODO: do this on constructor as it is a must to for blockprocessor to work - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() - - if startHeaders == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - bp.notarizedHdrs = make(mapShardHeaders, bp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < bp.shardCoordinator.NumberOfShards(); i++ { - hdr, ok := startHeaders[i].(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[i] = append(bp.notarizedHdrs[i], hdr) - } - - hdr, ok := startHeaders[sharding.MetachainShardId].(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[sharding.MetachainShardId] = append(bp.notarizedHdrs[sharding.MetachainShardId], hdr) - - return nil + //TODO: protect this to be called only once at genesis time + //TODO: do this on constructor as it is a must to for blockprocessor to work + bp.mutNotarizedHdrs.Lock() + defer bp.mutNotarizedHdrs.Unlock() + + if startHeaders == nil { + return process.ErrNotarizedHdrsSliceIsNil + } + + bp.notarizedHdrs = make(mapShardHeaders, bp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < bp.shardCoordinator.NumberOfShards(); i++ { + hdr, ok := startHeaders[i].(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + bp.notarizedHdrs[i] = append(bp.notarizedHdrs[i], hdr) + } + + hdr, ok := startHeaders[sharding.MetachainShardId].(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + bp.notarizedHdrs[sharding.MetachainShardId] = append(bp.notarizedHdrs[sharding.MetachainShardId], hdr) + + return nil } func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler, shardId uint32, maxRound uint64) error { - prevHdr, err := bp.getLastNotarizedHdr(shardId) - if err != nil { - return err - } - - isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed - if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { - return process.ErrNoSortedHdrsForShard - } - - missingNonces := make([]uint64, 0) - for i := 0; i < len(sortedHdrs); i++ { - currHdr := sortedHdrs[i] - if currHdr == nil { - continue - } - - if i > 0 { - prevHdr = sortedHdrs[i-1] - } - - hdrTooNew := currHdr.GetRound() > maxRound || prevHdr.GetRound() > maxRound - if hdrTooNew { - continue - } - - if currHdr.GetNonce()-prevHdr.GetNonce() > 1 { - for j := prevHdr.GetNonce() + 1; j < currHdr.GetNonce(); j++ { - missingNonces = append(missingNonces, j) - } - } - } - - // ask for headers, if there most probably should be - if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { - startNonce := prevHdr.GetNonce() + 1 - for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { - missingNonces = append(missingNonces, nonce) - } - } - - requested := 0 - for _, nonce := range missingNonces { - // do the request here - if bp.onRequestHeaderHandlerByNonce == nil { - return process.ErrNilRequestHeaderHandlerByNonce - } - - if requested >= process.MaxHeaderRequestsAllowed { - break - } - - requested++ - go bp.onRequestHeaderHandlerByNonce(shardId, nonce) - } - - return nil + prevHdr, err := bp.getLastNotarizedHdr(shardId) + if err != nil { + return err + } + + isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed + if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { + return process.ErrNoSortedHdrsForShard + } + + missingNonces := make([]uint64, 0) + for i := 0; i < len(sortedHdrs); i++ { + currHdr := sortedHdrs[i] + if currHdr == nil { + continue + } + + if i > 0 { + prevHdr = sortedHdrs[i-1] + } + + hdrTooNew := currHdr.GetRound() > maxRound || prevHdr.GetRound() > maxRound + if hdrTooNew { + continue + } + + if currHdr.GetNonce()-prevHdr.GetNonce() > 1 { + for j := prevHdr.GetNonce() + 1; j < currHdr.GetNonce(); j++ { + missingNonces = append(missingNonces, j) + } + } + } + + // ask for headers, if there most probably should be + if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { + startNonce := prevHdr.GetNonce() + 1 + for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { + missingNonces = append(missingNonces, nonce) + } + } + + requested := 0 + for _, nonce := range missingNonces { + // do the request here + if bp.onRequestHeaderHandlerByNonce == nil { + return process.ErrNilRequestHeaderHandlerByNonce + } + + if requested >= process.MaxHeaderRequestsAllowed { + break + } + + requested++ + go bp.onRequestHeaderHandlerByNonce(shardId, nonce) + } + + return nil } func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { - lines := make([]*display.LineData, 0) - - lines = append(lines, display.NewLineData(false, []string{ - "", - "Epoch", - fmt.Sprintf("%d", headerHandler.GetEpoch())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Round", - fmt.Sprintf("%d", headerHandler.GetRound())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "TimeStamp", - fmt.Sprintf("%d", headerHandler.GetTimeStamp())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Nonce", - fmt.Sprintf("%d", headerHandler.GetNonce())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Prev hash", - core.ToB64(headerHandler.GetPrevHash())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Prev rand seed", - core.ToB64(headerHandler.GetPrevRandSeed())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Rand seed", - core.ToB64(headerHandler.GetRandSeed())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Pub keys bitmap", - core.ToHex(headerHandler.GetPubKeysBitmap())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Signature", - core.ToB64(headerHandler.GetSignature())})) - lines = append(lines, display.NewLineData(true, []string{ - "", - "Root hash", - core.ToB64(headerHandler.GetRootHash())})) - return lines + lines := make([]*display.LineData, 0) + + lines = append(lines, display.NewLineData(false, []string{ + "", + "Epoch", + fmt.Sprintf("%d", headerHandler.GetEpoch())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Round", + fmt.Sprintf("%d", headerHandler.GetRound())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "TimeStamp", + fmt.Sprintf("%d", headerHandler.GetTimeStamp())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Nonce", + fmt.Sprintf("%d", headerHandler.GetNonce())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Prev hash", + core.ToB64(headerHandler.GetPrevHash())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Prev rand seed", + core.ToB64(headerHandler.GetPrevRandSeed())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Rand seed", + core.ToB64(headerHandler.GetRandSeed())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Pub keys bitmap", + core.ToHex(headerHandler.GetPubKeysBitmap())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Signature", + core.ToB64(headerHandler.GetSignature())})) + lines = append(lines, display.NewLineData(true, []string{ + "", + "Root hash", + core.ToB64(headerHandler.GetRootHash())})) + return lines } // checkProcessorNilParameters will check the imput parameters for nil values func checkProcessorNilParameters( - accounts state.AccountsAdapter, - forkDetector process.ForkDetector, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, + accounts state.AccountsAdapter, + forkDetector process.ForkDetector, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + store dataRetriever.StorageService, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) error { - if accounts == nil || accounts.IsInterfaceNil() { - return process.ErrNilAccountsAdapter - } - if forkDetector == nil || forkDetector.IsInterfaceNil() { - return process.ErrNilForkDetector - } - if hasher == nil || hasher.IsInterfaceNil() { - return process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return process.ErrNilMarshalizer - } - if store == nil || store.IsInterfaceNil() { - return process.ErrNilStorage - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return process.ErrNilShardCoordinator - } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { - return process.ErrNilNodesCoordinator - } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { - return process.ErrNilSpecialAddressHandler - } - if uint64Converter == nil || uint64Converter.IsInterfaceNil() { - return process.ErrNilUint64Converter - } - - return nil + if accounts == nil || accounts.IsInterfaceNil() { + return process.ErrNilAccountsAdapter + } + if forkDetector == nil || forkDetector.IsInterfaceNil() { + return process.ErrNilForkDetector + } + if hasher == nil || hasher.IsInterfaceNil() { + return process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return process.ErrNilMarshalizer + } + if store == nil || store.IsInterfaceNil() { + return process.ErrNilStorage + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return process.ErrNilShardCoordinator + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return process.ErrNilNodesCoordinator + } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return process.ErrNilSpecialAddressHandler + } + if uint64Converter == nil || uint64Converter.IsInterfaceNil() { + return process.ErrNilUint64Converter + } + + return nil } diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index f0d3329c7d6..f48a7155077 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -1,235 +1,235 @@ package block import ( - "fmt" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" ) type transactionCounter struct { - mutex sync.RWMutex - currentBlockTxs int - totalTxs int + mutex sync.RWMutex + currentBlockTxs int + totalTxs int } // NewTransactionCounter returns a new object that keeps track of how many transactions // were executed in total, and in the current block func NewTransactionCounter() *transactionCounter { - return &transactionCounter{ - mutex: sync.RWMutex{}, - currentBlockTxs: 0, - totalTxs: 0, - } + return &transactionCounter{ + mutex: sync.RWMutex{}, + currentBlockTxs: 0, + totalTxs: 0, + } } // getNumTxsFromPool returns the number of transactions from pool for a given shard func (txc *transactionCounter) getNumTxsFromPool(shardId uint32, dataPool dataRetriever.PoolsHolder, nrShards uint32) int { - txPool := dataPool.Transactions() - if txPool == nil { - return 0 - } - - sumTxs := 0 - - strCache := process.ShardCacherIdentifier(shardId, shardId) - txStore := txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - - for i := uint32(0); i < nrShards; i++ { - if i == shardId { - continue - } - - strCache = process.ShardCacherIdentifier(i, shardId) - txStore = txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - - strCache = process.ShardCacherIdentifier(shardId, i) - txStore = txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - } - - return sumTxs + txPool := dataPool.Transactions() + if txPool == nil { + return 0 + } + + sumTxs := 0 + + strCache := process.ShardCacherIdentifier(shardId, shardId) + txStore := txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + + for i := uint32(0); i < nrShards; i++ { + if i == shardId { + continue + } + + strCache = process.ShardCacherIdentifier(i, shardId) + txStore = txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + + strCache = process.ShardCacherIdentifier(shardId, i) + txStore = txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + } + + return sumTxs } // substractRestoredTxs updated the total processed txs in case of restore func (txc *transactionCounter) substractRestoredTxs(txsNr int) { - txc.mutex.Lock() - txc.totalTxs = txc.totalTxs - txsNr - txc.mutex.Unlock() + txc.mutex.Lock() + txc.totalTxs = txc.totalTxs - txsNr + txc.mutex.Unlock() } // displayLogInfo writes to the output information about the block and transactions func (txc *transactionCounter) displayLogInfo( - header *block.Header, - body block.Body, - headerHash []byte, - numShards uint32, - selfId uint32, - dataPool dataRetriever.PoolsHolder, + header *block.Header, + body block.Body, + headerHash []byte, + numShards uint32, + selfId uint32, + dataPool dataRetriever.PoolsHolder, ) { - dispHeader, dispLines := txc.createDisplayableShardHeaderAndBlockBody(header, body) - - tblString, err := display.CreateTableString(dispHeader, dispLines) - if err != nil { - log.Error(err.Error()) - return - } - - txc.mutex.RLock() - tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\n"+ - "Total txs processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n\n"+ - "Total shards: %d. Current shard id: %d\n", - core.ToB64(headerHash), - txc.totalTxs, - txc.currentBlockTxs, - txc.getNumTxsFromPool(selfId, dataPool, numShards), - numShards, - selfId) - txc.mutex.RUnlock() - log.Info(tblString) + dispHeader, dispLines := txc.createDisplayableShardHeaderAndBlockBody(header, body) + + tblString, err := display.CreateTableString(dispHeader, dispLines) + if err != nil { + log.Error(err.Error()) + return + } + + txc.mutex.RLock() + tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\n"+ + "Total txs processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n\n"+ + "Total shards: %d. Current shard id: %d\n", + core.ToB64(headerHash), + txc.totalTxs, + txc.currentBlockTxs, + txc.getNumTxsFromPool(selfId, dataPool, numShards), + numShards, + selfId) + txc.mutex.RUnlock() + log.Info(tblString) } func (txc *transactionCounter) createDisplayableShardHeaderAndBlockBody( - header *block.Header, - body block.Body, + header *block.Header, + body block.Body, ) ([]string, []*display.LineData) { - tableHeader := []string{"Part", "Parameter", "Value"} + tableHeader := []string{"Part", "Parameter", "Value"} - lines := displayHeader(header) + lines := displayHeader(header) - shardLines := make([]*display.LineData, 0) - shardLines = append(shardLines, display.NewLineData(false, []string{ - "Header", - "Block type", - "TxBlock"})) - shardLines = append(shardLines, display.NewLineData(false, []string{ - "", - "Shard", - fmt.Sprintf("%d", header.ShardId)})) - shardLines = append(shardLines, lines...) + shardLines := make([]*display.LineData, 0) + shardLines = append(shardLines, display.NewLineData(false, []string{ + "Header", + "Block type", + "TxBlock"})) + shardLines = append(shardLines, display.NewLineData(false, []string{ + "", + "Shard", + fmt.Sprintf("%d", header.ShardId)})) + shardLines = append(shardLines, lines...) - if header.BlockBodyType == block.TxBlock { - shardLines = txc.displayMetaHashesIncluded(shardLines, header) - shardLines = txc.displayTxBlockBody(shardLines, body) + if header.BlockBodyType == block.TxBlock { + shardLines = txc.displayMetaHashesIncluded(shardLines, header) + shardLines = txc.displayTxBlockBody(shardLines, body) - return tableHeader, shardLines - } + return tableHeader, shardLines + } - // TODO: implement the other block bodies + // TODO: implement the other block bodies - shardLines = append(shardLines, display.NewLineData(false, []string{"Unknown", "", ""})) - return tableHeader, shardLines + shardLines = append(shardLines, display.NewLineData(false, []string{"Unknown", "", ""})) + return tableHeader, shardLines } func (txc *transactionCounter) displayMetaHashesIncluded( - lines []*display.LineData, - header *block.Header, + lines []*display.LineData, + header *block.Header, ) []*display.LineData { - if header.MetaBlockHashes == nil || len(header.MetaBlockHashes) == 0 { - return lines - } + if header.MetaBlockHashes == nil || len(header.MetaBlockHashes) == 0 { + return lines + } - part := fmt.Sprintf("MetaBlockHashes") - for i := 0; i < len(header.MetaBlockHashes); i++ { - if i == 0 || i >= len(header.MetaBlockHashes)-1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("MetaBlockHash_%d", i+1), - core.ToB64(header.MetaBlockHashes[i])})) + part := fmt.Sprintf("MetaBlockHashes") + for i := 0; i < len(header.MetaBlockHashes); i++ { + if i == 0 || i >= len(header.MetaBlockHashes)-1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("MetaBlockHash_%d", i+1), + core.ToB64(header.MetaBlockHashes[i])})) - part = "" - } else if i == 1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("..."), - fmt.Sprintf("...")})) + part = "" + } else if i == 1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("..."), + fmt.Sprintf("...")})) - part = "" - } - } + part = "" + } + } - lines[len(lines)-1].HorizontalRuleAfter = true + lines[len(lines)-1].HorizontalRuleAfter = true - return lines + return lines } func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, body block.Body) []*display.LineData { - currentBlockTxs := 0 + currentBlockTxs := 0 - for i := 0; i < len(body); i++ { - miniBlock := body[i] + for i := 0; i < len(body); i++ { + miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) + part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) - if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { - lines = append(lines, display.NewLineData(false, []string{ - part, "", ""})) - } + if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { + lines = append(lines, display.NewLineData(false, []string{ + part, "", ""})) + } - currentBlockTxs += len(miniBlock.TxHashes) + currentBlockTxs += len(miniBlock.TxHashes) - for j := 0; j < len(miniBlock.TxHashes); j++ { - if j == 0 || j >= len(miniBlock.TxHashes)-1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("TxHash_%d", j+1), - core.ToB64(miniBlock.TxHashes[j])})) + for j := 0; j < len(miniBlock.TxHashes); j++ { + if j == 0 || j >= len(miniBlock.TxHashes)-1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("TxHash_%d", j+1), + core.ToB64(miniBlock.TxHashes[j])})) - part = "" - } else if j == 1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("..."), - fmt.Sprintf("...")})) + part = "" + } else if j == 1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("..."), + fmt.Sprintf("...")})) - part = "" - } - } + part = "" + } + } - lines[len(lines)-1].HorizontalRuleAfter = true - } + lines[len(lines)-1].HorizontalRuleAfter = true + } - txc.mutex.Lock() - txc.currentBlockTxs = currentBlockTxs - txc.totalTxs += currentBlockTxs - txc.mutex.Unlock() + txc.mutex.Lock() + txc.currentBlockTxs = currentBlockTxs + txc.totalTxs += currentBlockTxs + txc.mutex.Unlock() - return lines + return lines } func DisplayLastNotarized( - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - lastNotarizedHdrForShard data.HeaderHandler, - shardId uint32) { - lastNotarizedHdrHashForShard, errNotCritical := core.CalculateHash( - marshalizer, - hasher, - lastNotarizedHdrForShard) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - log.Info(fmt.Sprintf("last notarized block from shard %d has: round = %d, nonce = %d, hash = %s\n", - shardId, - lastNotarizedHdrForShard.GetRound(), - lastNotarizedHdrForShard.GetNonce(), - core.ToB64(lastNotarizedHdrHashForShard))) + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + lastNotarizedHdrForShard data.HeaderHandler, + shardId uint32) { + lastNotarizedHdrHashForShard, errNotCritical := core.CalculateHash( + marshalizer, + hasher, + lastNotarizedHdrForShard) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + log.Info(fmt.Sprintf("last notarized block from shard %d has: round = %d, nonce = %d, hash = %s\n", + shardId, + lastNotarizedHdrForShard.GetRound(), + lastNotarizedHdrForShard.GetNonce(), + core.ToB64(lastNotarizedHdrHashForShard))) } diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 5f4e9c742df..37700adbdcf 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -169,12 +169,12 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { - // basic validation already done in interceptors - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } for j := 0; j < len(miniBlock.TxHashes); j++ { if haveTime() < 0 { @@ -211,35 +211,35 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { - for _, rewardMb := range computedRewardMiniblocks { - txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} - for _, txHash := range rewardMb.TxHashes { - tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) - if !ok { - log.Error("reward transaction should be in pool but not found") - continue - } - - rTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - log.Error("wrong type in reward transactions pool") - } - - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ - tx: rTx, - txShardInfo: txShardInfo, - } - } - } + for _, rewardMb := range computedRewardMiniblocks { + txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + for _, txHash := range rewardMb.TxHashes { + tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) + if !ok { + log.Error("reward transaction should be in pool but not found") + continue + } + + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error("wrong type in reward transactions pool") + } + + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ + tx: rTx, + txShardInfo: txShardInfo, + } + } + } } // SaveTxBlockToStorage saves the reward transactions from body into storage func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { - for i := 0; i < len(body); i++ { - miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlock { - continue - } + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock { + continue + } err := rtp.saveTxsToStorage( miniBlock.TxHashes, @@ -297,25 +297,25 @@ func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { - rewardTxs := block.Body{} - for _, mb := range body { - if mb.Type != block.RewardsBlock { - continue - } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } - - rewardTxs = append(rewardTxs, mb) - } - - missingTxsForShard := rtp.computeExistingAndMissing( - rewardTxs, - &rtp.rewardTxsForBlock, - rtp.chReceivedAllRewardTxs, - block.RewardsBlock, - rtp.rewardTxPool, - ) + rewardTxs := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxs = append(rewardTxs, mb) + } + + missingTxsForShard := rtp.computeExistingAndMissing( + rewardTxs, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) return missingTxsForShard } @@ -474,8 +474,8 @@ func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.Transact // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { - if rtp == nil { - return true - } - return false + if rtp == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 517f99304f1..d1ea0b5b8d7 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -294,7 +294,7 @@ func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { currTx.Epoch = rtxh.address.Epoch() currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { @@ -306,7 +306,7 @@ func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { currTx.Epoch = rtxh.address.Epoch() currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { @@ -318,7 +318,7 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { currTx.Epoch = rtxh.address.Epoch() currTx.Round = rtxh.address.Round() - return currTx + return currTx } // createRewardFromFees creates the reward transactions from accumulated fees diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 8e24576c4d1..37a81979044 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,14 +1,14 @@ package preprocess import ( - "math/big" - "testing" + "math/big" + "testing" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" ) func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index df5b56769de..a7018fb2133 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1,711 +1,711 @@ package preprocess import ( - "bytes" - "encoding/hex" - "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "math/big" - "math/rand" - "reflect" - "sync" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/stretchr/testify/assert" + "bytes" + "encoding/hex" + "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" ) func initDataPool() *mock.PoolsHolderStub { - sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &smartContractResult.SmartContractResult{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &smartContractResult.SmartContractResult{Nonce: 10}, true - } - return nil, false - }, - } - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &rewardTx.RewardTx{Value: big.NewInt(100)}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &rewardTx.RewardTx{Value: big.NewInt(100)}, true - } - return nil, false - }, - } - }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - }, - MiniBlocksCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - cs.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.RegisterHandlerCalled = func(i func(key []byte)) {} - cs.RemoveCalled = func(key []byte) {} - return cs - }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - return cs - }, - } - return sdp + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + } + }, + UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &smartContractResult.SmartContractResult{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &smartContractResult.SmartContractResult{Nonce: 10}, true + } + return nil, false + }, + } + }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + } + }, + HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + }, + MetaBlocksCalled: func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + }, + MiniBlocksCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.RegisterHandlerCalled = func(i func(key []byte)) {} + cs.RemoveCalled = func(key []byte) {} + return cs + }, + HeadersCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + return cs + }, + } + return sdp } func TestTxsPreprocessor_NewTransactionPreprocessorNilPool(t *testing.T) { - t.Parallel() + t.Parallel() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - nil, - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + nil, + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTransactionPool, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilStore(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - nil, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTxStorage, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTxStorage, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilHasher(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - nil, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilHasher, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilMarsalizer(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - nil, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + nil, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilTxProce(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTxProcessor, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTxProcessor, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilShardCoord(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - nil, - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + nil, + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilAccounts(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - nil, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilAccountsAdapter, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilRequestFunc(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - nil, - ) + tdp := initDataPool() + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + nil, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilRequestHandler, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilRequestHandler, err) } func TestTxsPreProcessor_GetTransactionFromPool(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - txHash := []byte("tx1_hash") - tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) - assert.NotNil(t, txs) - assert.NotNil(t, tx) - assert.Equal(t, uint64(10), tx.(*transaction.Transaction).Nonce) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + txHash := []byte("tx1_hash") + tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) + assert.NotNil(t, txs) + assert.NotNil(t, tx) + assert.Equal(t, uint64(10), tx.(*transaction.Transaction).Nonce) } func TestTransactionPreprocessor_RequestTransactionFromNetwork(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - shardId := uint32(1) - txHash1 := []byte("tx_hash1") - txHash2 := []byte("tx_hash2") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash1) - txHashes = append(txHashes, txHash2) - mBlk := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} - body = append(body, &mBlk) - txsRequested := txs.RequestBlockTransactions(body) - assert.Equal(t, 2, txsRequested) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + shardId := uint32(1) + txHash1 := []byte("tx_hash1") + txHash2 := []byte("tx_hash2") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash1) + txHashes = append(txHashes, txHash2) + mBlk := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} + body = append(body, &mBlk) + txsRequested := txs.RequestBlockTransactions(body) + assert.Equal(t, 2, txsRequested) } func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - shardId := uint32(1) - txHash1 := []byte("tx_hash1") - txHash2 := []byte("tx_hash2") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash1) - txHashes = append(txHashes, txHash2) - mb := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} - txsRequested := txs.RequestTransactionsForMiniBlock(mb) - assert.Equal(t, 2, txsRequested) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + shardId := uint32(1) + txHash1 := []byte("tx_hash1") + txHash2 := []byte("tx_hash2") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash1) + txHashes = append(txHashes, txHash2) + mb := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} + txsRequested := txs.RequestTransactionsForMiniBlock(mb) + assert.Equal(t, 2, txsRequested) } func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { - t.Parallel() - - dataPool := mock.NewPoolsHolderMock() - - shardedDataStub := &mock.ShardedDataStub{ - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &transaction.Transaction{}, true - }, - } - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, - } - - dataPool.SetTransactions(shardedDataStub) - - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - dataPool.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - //add 3 tx hashes on requested list - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - txs.AddTxHashToRequestedList(txHash1) - txs.AddTxHashToRequestedList(txHash2) - txs.AddTxHashToRequestedList(txHash3) - - txs.SetMissingTxs(3) - - //received txHash2 - txs.ReceivedTransaction(txHash2) - - assert.True(t, txs.IsTxHashRequested(txHash1)) - assert.False(t, txs.IsTxHashRequested(txHash2)) - assert.True(t, txs.IsTxHashRequested(txHash3)) + t.Parallel() + + dataPool := mock.NewPoolsHolderMock() + + shardedDataStub := &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &transaction.Transaction{}, true + }, + } + }, + RegisterHandlerCalled: func(i func(key []byte)) { + }, + } + + dataPool.SetTransactions(shardedDataStub) + + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + dataPool.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + //add 3 tx hashes on requested list + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + txs.AddTxHashToRequestedList(txHash1) + txs.AddTxHashToRequestedList(txHash2) + txs.AddTxHashToRequestedList(txHash3) + + txs.SetMissingTxs(3) + + //received txHash2 + txs.ReceivedTransaction(txHash2) + + assert.True(t, txs.IsTxHashRequested(txHash1)) + assert.False(t, txs.IsTxHashRequested(txHash2)) + assert.True(t, txs.IsTxHashRequested(txHash3)) } //------- GetAllTxsFromMiniBlock func computeHash(data interface{}, marshalizer marshal.Marshalizer, hasher hashing.Hasher) []byte { - buff, _ := marshalizer.Marshal(data) - return hasher.Compute(string(buff)) + buff, _ := marshalizer.Marshal(data) + return hasher.Compute(string(buff)) } func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - senderShardId := uint32(0) - destinationShardId := uint32(1) - - transactions := []*transaction.Transaction{ - {Nonce: 1}, - {Nonce: 2}, - {Nonce: 3}, - } - transactionsHashes := make([][]byte, len(transactions)) - - //add defined transactions to sender-destination cacher - for idx, tx := range transactions { - transactionsHashes[idx] = computeHash(tx, marshalizer, hasher) - - dataPool.Transactions().AddData( - transactionsHashes[idx], - tx, - process.ShardCacherIdentifier(senderShardId, destinationShardId), - ) - } - - //add some random data - txRandom := &transaction.Transaction{Nonce: 4} - dataPool.Transactions().AddData( - computeHash(txRandom, marshalizer, hasher), - txRandom, - process.ShardCacherIdentifier(3, 4), - ) - - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - dataPool.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - mb := &block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: destinationShardId, - TxHashes: transactionsHashes, - } - - txsRetrieved, txHashesRetrieved, err := txs.getAllTxsFromMiniBlock(mb, func() bool { return true }) - - assert.Nil(t, err) - assert.Equal(t, len(transactions), len(txsRetrieved)) - assert.Equal(t, len(transactions), len(txHashesRetrieved)) - for idx, tx := range transactions { - //txReceived should be all txs in the same order - assert.Equal(t, txsRetrieved[idx], tx) - //verify corresponding transaction hashes - assert.Equal(t, txHashesRetrieved[idx], computeHash(tx, marshalizer, hasher)) - } + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + senderShardId := uint32(0) + destinationShardId := uint32(1) + + transactions := []*transaction.Transaction{ + {Nonce: 1}, + {Nonce: 2}, + {Nonce: 3}, + } + transactionsHashes := make([][]byte, len(transactions)) + + //add defined transactions to sender-destination cacher + for idx, tx := range transactions { + transactionsHashes[idx] = computeHash(tx, marshalizer, hasher) + + dataPool.Transactions().AddData( + transactionsHashes[idx], + tx, + process.ShardCacherIdentifier(senderShardId, destinationShardId), + ) + } + + //add some random data + txRandom := &transaction.Transaction{Nonce: 4} + dataPool.Transactions().AddData( + computeHash(txRandom, marshalizer, hasher), + txRandom, + process.ShardCacherIdentifier(3, 4), + ) + + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + dataPool.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + mb := &block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: destinationShardId, + TxHashes: transactionsHashes, + } + + txsRetrieved, txHashesRetrieved, err := txs.getAllTxsFromMiniBlock(mb, func() bool { return true }) + + assert.Nil(t, err) + assert.Equal(t, len(transactions), len(txsRetrieved)) + assert.Equal(t, len(transactions), len(txHashesRetrieved)) + for idx, tx := range transactions { + //txReceived should be all txs in the same order + assert.Equal(t, txsRetrieved[idx], tx) + //verify corresponding transaction hashes + assert.Equal(t, txHashesRetrieved[idx], computeHash(tx, marshalizer, hasher)) + } } func TestTransactionPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilTxBlockBody) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilTxBlockBody) } func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - body := make(block.Body, 0) - txHash := []byte("txHash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - err := txs.RemoveTxBlockFromPools(body, tdp.MiniBlocks()) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + body := make(block.Body, 0) + txHash := []byte("txHash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + err := txs.RemoveTxBlockFromPools(body, tdp.MiniBlocks()) + assert.Nil(t, err) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: uint64(i)} + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: uint64(i)} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, len(addedTxs), len(mb.TxHashes)) + assert.Equal(t, len(addedTxs), len(mb.TxHashes)) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCalls(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(5) + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(5) - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: []byte("012345678910")} + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: []byte("012345678910")} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, len(addedTxs), len(mb.TxHashes)) + assert.Equal(t, len(addedTxs), len(mb.TxHashes)) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCall(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - numTxsToAdd := 5 - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) + numTxsToAdd := 5 + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) - scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} + scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, numTxsToAdd, len(mb.TxHashes)) + assert.Equal(t, numTxsToAdd, len(mb.TxHashes)) } //------- SortTxByNonce @@ -714,167 +714,167 @@ var r *rand.Rand var mutex sync.Mutex func init() { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + r = rand.New(rand.NewSource(time.Now().UnixNano())) } func TestSortTxByNonce_NilCacherShouldErr(t *testing.T) { - t.Parallel() - transactions, txHashes, err := SortTxByNonce(nil) - assert.Nil(t, transactions) - assert.Nil(t, txHashes) - assert.Equal(t, process.ErrNilCacher, err) + t.Parallel() + transactions, txHashes, err := SortTxByNonce(nil) + assert.Nil(t, transactions) + assert.Nil(t, txHashes) + assert.Equal(t, process.ErrNilCacher, err) } func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 0, len(transactions)) - assert.Equal(t, 0, len(txHashes)) - assert.Nil(t, err) + t.Parallel() + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + transactions, txHashes, err := SortTxByNonce(cacher) + assert.Equal(t, 0, len(transactions)) + assert.Equal(t, 0, len(txHashes)) + assert.Nil(t, err) } func TestSortTxByNonce_OneTxShouldWork(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 1, len(transactions)) - assert.Equal(t, 1, len(txHashes)) - assert.Nil(t, err) - assert.True(t, hashInSlice(hash, txHashes)) - assert.True(t, txInSlice(tx, transactions)) + t.Parallel() + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + hash, tx := createRandTx(r) + cacher.HasOrAdd(hash, tx) + transactions, txHashes, err := SortTxByNonce(cacher) + assert.Equal(t, 1, len(transactions)) + assert.Equal(t, 1, len(txHashes)) + assert.Nil(t, err) + assert.True(t, hashInSlice(hash, txHashes)) + assert.True(t, txInSlice(tx, transactions)) } func createRandTx(rand *rand.Rand) ([]byte, *transaction.Transaction) { - mutex.Lock() - nonce := rand.Uint64() - mutex.Unlock() - tx := &transaction.Transaction{ - Nonce: nonce, - } - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - return hash, tx + mutex.Lock() + nonce := rand.Uint64() + mutex.Unlock() + tx := &transaction.Transaction{ + Nonce: nonce, + } + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + return hash, tx } func hashInSlice(hash []byte, hashes [][]byte) bool { - for _, h := range hashes { - if bytes.Equal(h, hash) { - return true - } - } - return false + for _, h := range hashes { + if bytes.Equal(h, hash) { + return true + } + } + return false } func txInSlice(tx *transaction.Transaction, transactions []*transaction.Transaction) bool { - for _, t := range transactions { - if reflect.DeepEqual(tx, t) { - return true - } - } - return false + for _, t := range transactions { + if reflect.DeepEqual(tx, t) { + return true + } + } + return false } func TestSortTxByNonce_MoreTransactionsShouldNotErr(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - _, _, err := SortTxByNonce(cache) - assert.Nil(t, err) + t.Parallel() + cache, _, _ := genCacherTransactionsHashes(100) + _, _, err := SortTxByNonce(cache) + assert.Nil(t, err) } func TestSortTxByNonce_MoreTransactionsShouldRetSameSize(t *testing.T) { - t.Parallel() - cache, genTransactions, _ := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - assert.Equal(t, len(genTransactions), len(transactions)) - assert.Equal(t, len(genTransactions), len(txHashes)) + t.Parallel() + cache, genTransactions, _ := genCacherTransactionsHashes(100) + transactions, txHashes, _ := SortTxByNonce(cache) + assert.Equal(t, len(genTransactions), len(transactions)) + assert.Equal(t, len(genTransactions), len(txHashes)) } func TestSortTxByNonce_MoreTransactionsShouldContainSameElements(t *testing.T) { - t.Parallel() - cache, genTransactions, genHashes := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - for i := 0; i < len(genTransactions); i++ { - assert.True(t, hashInSlice(genHashes[i], txHashes)) - assert.True(t, txInSlice(genTransactions[i], transactions)) - } + t.Parallel() + cache, genTransactions, genHashes := genCacherTransactionsHashes(100) + transactions, txHashes, _ := SortTxByNonce(cache) + for i := 0; i < len(genTransactions); i++ { + assert.True(t, hashInSlice(genHashes[i], txHashes)) + assert.True(t, txInSlice(genTransactions[i], transactions)) + } } func TestSortTxByNonce_MoreTransactionsShouldContainSortedElements(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - transactions, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(transactions); i++ { - tx := transactions[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Println(tx.Nonce) - lastNonce = tx.Nonce - } + t.Parallel() + cache, _, _ := genCacherTransactionsHashes(100) + transactions, _, _ := SortTxByNonce(cache) + lastNonce := uint64(0) + for i := 0; i < len(transactions); i++ { + tx := transactions[i] + assert.True(t, lastNonce <= tx.Nonce) + fmt.Println(tx.Nonce) + lastNonce = tx.Nonce + } } func TestSortTxByNonce_TransactionsWithSameNonceShouldGetSorted(t *testing.T) { - t.Parallel() - transactions := []*transaction.Transaction{ - {Nonce: 1, Signature: []byte("sig1")}, - {Nonce: 2, Signature: []byte("sig2")}, - {Nonce: 1, Signature: []byte("sig3")}, - {Nonce: 2, Signature: []byte("sig4")}, - {Nonce: 3, Signature: []byte("sig5")}, - } - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) - for _, tx := range transactions { - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - - cache.Put(hash, tx) - } - sortedTxs, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(sortedTxs); i++ { - tx := sortedTxs[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) - lastNonce = tx.Nonce - } - assert.Equal(t, len(sortedTxs), len(transactions)) - //test if one transaction from transactions might not be in sortedTx - for _, tx := range transactions { - found := false - for _, stx := range sortedTxs { - if reflect.DeepEqual(tx, stx) { - found = true - break - } - } - if !found { - assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) - } - } + t.Parallel() + transactions := []*transaction.Transaction{ + {Nonce: 1, Signature: []byte("sig1")}, + {Nonce: 2, Signature: []byte("sig2")}, + {Nonce: 1, Signature: []byte("sig3")}, + {Nonce: 2, Signature: []byte("sig4")}, + {Nonce: 3, Signature: []byte("sig5")}, + } + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) + for _, tx := range transactions { + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + cache.Put(hash, tx) + } + sortedTxs, _, _ := SortTxByNonce(cache) + lastNonce := uint64(0) + for i := 0; i < len(sortedTxs); i++ { + tx := sortedTxs[i] + assert.True(t, lastNonce <= tx.Nonce) + fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) + lastNonce = tx.Nonce + } + assert.Equal(t, len(sortedTxs), len(transactions)) + //test if one transaction from transactions might not be in sortedTx + for _, tx := range transactions { + found := false + for _, stx := range sortedTxs { + if reflect.DeepEqual(tx, stx) { + found = true + break + } + } + if !found { + assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) + } + } } func genCacherTransactionsHashes(noOfTx int) (storage.Cacher, []*transaction.Transaction, [][]byte) { - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) - genHashes := make([][]byte, 0) - genTransactions := make([]*transaction.Transaction, 0) - for i := 0; i < noOfTx; i++ { - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) + genHashes := make([][]byte, 0) + genTransactions := make([]*transaction.Transaction, 0) + for i := 0; i < noOfTx; i++ { + hash, tx := createRandTx(r) + cacher.HasOrAdd(hash, tx) - genHashes = append(genHashes, hash) - genTransactions = append(genTransactions, tx) - } - return cacher, genTransactions, genHashes + genHashes = append(genHashes, hash) + genTransactions = append(genTransactions, tx) + } + return cacher, genTransactions, genHashes } func BenchmarkSortTxByNonce1(b *testing.B) { - cache, _, _ := genCacherTransactionsHashes(10000) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = SortTxByNonce(cache) - } + cache, _, _ := genCacherTransactionsHashes(10000) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = SortTxByNonce(cache) + } } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a81f9d39263..55ea7c27d19 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,1573 +1,1572 @@ package block import ( - "fmt" - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/throttle" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" + "fmt" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/throttle" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" ) // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { - *baseProcessor - dataPool dataRetriever.PoolsHolder - blocksTracker process.BlocksTracker - metaBlockFinality int - - chRcvAllMetaHdrs chan bool - mutUsedMetaHdrsHashes sync.Mutex - usedMetaHdrsHashes map[uint64][][]byte - - mutRequestedMetaHdrsHashes sync.RWMutex - requestedMetaHdrsHashes map[string]bool - currHighestMetaHdrNonce uint64 - allNeededMetaHdrsFound bool - - core serviceContainer.Core - txCoordinator process.TransactionCoordinator - txCounter *transactionCounter + *baseProcessor + dataPool dataRetriever.PoolsHolder + blocksTracker process.BlocksTracker + metaBlockFinality int + + chRcvAllMetaHdrs chan bool + mutUsedMetaHdrsHashes sync.Mutex + usedMetaHdrsHashes map[uint64][][]byte + + mutRequestedMetaHdrsHashes sync.RWMutex + requestedMetaHdrsHashes map[string]bool + currHighestMetaHdrNonce uint64 + allNeededMetaHdrsFound bool + + core serviceContainer.Core + txCoordinator process.TransactionCoordinator + txCounter *transactionCounter } // NewShardProcessor creates a new shardProcessor object func NewShardProcessor( - core serviceContainer.Core, - dataPool dataRetriever.PoolsHolder, - store dataRetriever.StorageService, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - accounts state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - forkDetector process.ForkDetector, - blocksTracker process.BlocksTracker, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - txCoordinator process.TransactionCoordinator, - uint64Converter typeConverters.Uint64ByteSliceConverter, + core serviceContainer.Core, + dataPool dataRetriever.PoolsHolder, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + forkDetector process.ForkDetector, + blocksTracker process.BlocksTracker, + startHeaders map[uint32]data.HeaderHandler, + requestHandler process.RequestHandler, + txCoordinator process.TransactionCoordinator, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*shardProcessor, error) { - err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - uint64Converter) - if err != nil { - return nil, err - } - - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if blocksTracker == nil || blocksTracker.IsInterfaceNil() { - return nil, process.ErrNilBlocksTracker - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - if txCoordinator == nil || txCoordinator.IsInterfaceNil() { - return nil, process.ErrNilTransactionCoordinator - } - - blockSizeThrottler, err := throttle.NewBlockSizeThrottle() - if err != nil { - return nil, err - } - - base := &baseProcessor{ - accounts: accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - specialAddressHandler: specialAddressHandler, - uint64Converter: uint64Converter, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - } - err = base.setLastNotarizedHeadersSlice(startHeaders) - if err != nil { - return nil, err - } - - sp := shardProcessor{ - core: core, - baseProcessor: base, - dataPool: dataPool, - blocksTracker: blocksTracker, - txCoordinator: txCoordinator, - txCounter: NewTransactionCounter(), - } - - sp.chRcvAllMetaHdrs = make(chan bool) - - transactionPool := sp.dataPool.Transactions() - if transactionPool == nil { - return nil, process.ErrNilTransactionPool - } - - sp.requestedMetaHdrsHashes = make(map[string]bool) - sp.usedMetaHdrsHashes = make(map[uint64][][]byte) - - metaBlockPool := sp.dataPool.MetaBlocks() - if metaBlockPool == nil { - return nil, process.ErrNilMetaBlockPool - } - metaBlockPool.RegisterHandler(sp.receivedMetaBlock) - sp.onRequestHeaderHandler = requestHandler.RequestHeader - - sp.metaBlockFinality = process.MetaBlockFinality - sp.allNeededMetaHdrsFound = true - - return &sp, nil + err := checkProcessorNilParameters( + accounts, + forkDetector, + hasher, + marshalizer, + store, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + uint64Converter) + if err != nil { + return nil, err + } + + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if blocksTracker == nil || blocksTracker.IsInterfaceNil() { + return nil, process.ErrNilBlocksTracker + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + if txCoordinator == nil || txCoordinator.IsInterfaceNil() { + return nil, process.ErrNilTransactionCoordinator + } + + blockSizeThrottler, err := throttle.NewBlockSizeThrottle() + if err != nil { + return nil, err + } + + base := &baseProcessor{ + accounts: accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: forkDetector, + hasher: hasher, + marshalizer: marshalizer, + store: store, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, + uint64Converter: uint64Converter, + onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + appStatusHandler: statusHandler.NewNilStatusHandler(), + } + err = base.setLastNotarizedHeadersSlice(startHeaders) + if err != nil { + return nil, err + } + + sp := shardProcessor{ + core: core, + baseProcessor: base, + dataPool: dataPool, + blocksTracker: blocksTracker, + txCoordinator: txCoordinator, + txCounter: NewTransactionCounter(), + } + + sp.chRcvAllMetaHdrs = make(chan bool) + + transactionPool := sp.dataPool.Transactions() + if transactionPool == nil { + return nil, process.ErrNilTransactionPool + } + + sp.requestedMetaHdrsHashes = make(map[string]bool) + sp.usedMetaHdrsHashes = make(map[uint64][][]byte) + + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { + return nil, process.ErrNilMetaBlockPool + } + metaBlockPool.RegisterHandler(sp.receivedMetaBlock) + sp.onRequestHeaderHandler = requestHandler.RequestHeader + + sp.metaBlockFinality = process.MetaBlockFinality + sp.allNeededMetaHdrsFound = true + + return &sp, nil } // ProcessBlock processes a block. It returns nil if all ok or the specific error func (sp *shardProcessor) ProcessBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, - haveTime func() time.Duration, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + haveTime func() time.Duration, ) error { - if haveTime == nil { - return process.ErrNilHaveTimeHandler - } - - err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - header, ok := headerHandler.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return process.ErrWrongTypeAssertion - } - - mbLen := len(body) - totalTxCount := 0 - for i := 0; i < mbLen; i++ { - totalTxCount += len(body[i].TxHashes) - } - sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) - - err = sp.checkHeaderBodyCorrelation(header, body) - if err != nil { - return err - } - - numTxWithDst := sp.txCounter.getNumTxsFromPool(header.ShardId, sp.dataPool, sp.shardCoordinator.NumberOfShards()) - - sp.appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, uint64(numTxWithDst)) - - log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) - - // give transaction coordinator the consensus group validators addresses where to send the rewards. - consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( - headerHandler.GetPrevRandSeed(), - headerHandler.GetRound(), - sp.shardCoordinator.SelfId(), - ) - if err != nil { - return err - } - - sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) - sp.txCoordinator.CreateBlockStarted() - sp.txCoordinator.RequestBlockTransactions(body) - requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) - - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - err = sp.txCoordinator.IsDataPreparedForProcessing(haveTime) - if err != nil { - return err - } - - if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) - err = sp.waitForMetaHdrHashes(haveTime()) - sp.mutRequestedMetaHdrsHashes.Lock() - sp.allNeededMetaHdrsFound = true - unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) - sp.mutRequestedMetaHdrsHashes.Unlock() - log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) - if err != nil { - return err - } - } - - if sp.accounts.JournalLen() != 0 { - return process.ErrAccountStateDirty - } - - defer func() { - go sp.checkAndRequestIfMetaHeadersMissing(header.Round) - }() - - err = sp.checkMetaHeadersValidityAndFinality(header) - if err != nil { - return err - } - - err = sp.verifyCrossShardMiniBlockDstMe(header) - if err != nil { - return err - } - - defer func() { - if err != nil { - sp.RevertAccountState() - } - }() - - err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) - if err != nil { - return err - } - - if !sp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch - return err - } - - err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) - if err != nil { - return err - } - - return nil + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + header, ok := headerHandler.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return process.ErrWrongTypeAssertion + } + + mbLen := len(body) + totalTxCount := 0 + for i := 0; i < mbLen; i++ { + totalTxCount += len(body[i].TxHashes) + } + sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) + + err = sp.checkHeaderBodyCorrelation(header, body) + if err != nil { + return err + } + + numTxWithDst := sp.txCounter.getNumTxsFromPool(header.ShardId, sp.dataPool, sp.shardCoordinator.NumberOfShards()) + + sp.appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, uint64(numTxWithDst)) + + log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) + + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + sp.shardCoordinator.SelfId(), + ) + if err != nil { + return err + } + + sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) + sp.txCoordinator.CreateBlockStarted() + sp.txCoordinator.RequestBlockTransactions(body) + requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) + + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + err = sp.txCoordinator.IsDataPreparedForProcessing(haveTime) + if err != nil { + return err + } + + if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { + log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) + err = sp.waitForMetaHdrHashes(haveTime()) + sp.mutRequestedMetaHdrsHashes.Lock() + sp.allNeededMetaHdrsFound = true + unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) + sp.mutRequestedMetaHdrsHashes.Unlock() + log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) + if err != nil { + return err + } + } + + if sp.accounts.JournalLen() != 0 { + return process.ErrAccountStateDirty + } + + defer func() { + go sp.checkAndRequestIfMetaHeadersMissing(header.Round) + }() + + err = sp.checkMetaHeadersValidityAndFinality(header) + if err != nil { + return err + } + + err = sp.verifyCrossShardMiniBlockDstMe(header) + if err != nil { + return err + } + + defer func() { + if err != nil { + sp.RevertAccountState() + } + }() + + err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) + if err != nil { + return err + } + + if !sp.verifyStateRoot(header.GetRootHash()) { + err = process.ErrRootStateMissmatch + return err + } + + err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) + if err != nil { + return err + } + + return nil } // SetConsensusRewardAddresses - sets the reward addresses for the current consensus group func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { - sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) + sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) } // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { - metablockCache := sp.dataPool.MetaBlocks() - if metablockCache == nil { - return process.ErrNilMetaBlockPool - } - - tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - currAddedMetaHdrs := make([]*block.MetaBlock, 0) - for _, metaHash := range header.MetaBlockHashes { - value, ok := metablockCache.Peek(metaHash) - if !ok { - return process.ErrNilMetaBlockHeader - } - - metaHdr, ok := value.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) - } - - if len(currAddedMetaHdrs) == 0 { - return nil - } - - sort.Slice(currAddedMetaHdrs, func(i, j int) bool { - return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce - }) - - for _, metaHdr := range currAddedMetaHdrs { - err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) - if err != nil { - return err - } - tmpNotedHdr = metaHdr - } - - err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) - if err != nil { - return err - } - - return nil + metablockCache := sp.dataPool.MetaBlocks() + if metablockCache == nil { + return process.ErrNilMetaBlockPool + } + + tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + currAddedMetaHdrs := make([]*block.MetaBlock, 0) + for _, metaHash := range header.MetaBlockHashes { + value, ok := metablockCache.Peek(metaHash) + if !ok { + return process.ErrNilMetaBlockHeader + } + + metaHdr, ok := value.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) + } + + if len(currAddedMetaHdrs) == 0 { + return nil + } + + sort.Slice(currAddedMetaHdrs, func(i, j int) bool { + return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce + }) + + for _, metaHdr := range currAddedMetaHdrs { + err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + if err != nil { + return err + } + tmpNotedHdr = metaHdr + } + + err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) + if err != nil { + return err + } + + return nil } // check if shard headers are final by checking if newer headers were constructed upon them func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round uint64) error { - if header == nil || header.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - - sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return err - } - - lastVerifiedHdr := header - // verify if there are "K" block after current to make this one final - nextBlocksVerified := 0 - for _, tmpHdr := range sortedMetaHdrs { - if nextBlocksVerified >= sp.metaBlockFinality { - break - } - - // found a header with the next nonce - if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) - if err != nil { - log.Debug(err.Error()) - continue - } - - lastVerifiedHdr = tmpHdr.hdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified < sp.metaBlockFinality { - go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) - return process.ErrHeaderNotFinal - } - - return nil + if header == nil || header.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + + sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return err + } + + lastVerifiedHdr := header + // verify if there are "K" block after current to make this one final + nextBlocksVerified := 0 + for _, tmpHdr := range sortedMetaHdrs { + if nextBlocksVerified >= sp.metaBlockFinality { + break + } + + // found a header with the next nonce + if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + if err != nil { + log.Debug(err.Error()) + continue + } + + lastVerifiedHdr = tmpHdr.hdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified < sp.metaBlockFinality { + go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + return process.ErrHeaderNotFinal + } + + return nil } // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { - mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) - for i := 0; i < len(hdr.MiniBlockHeaders); i++ { - mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] - } - - if len(hdr.MiniBlockHeaders) != len(body) { - return process.ErrHeaderBodyMismatch - } - - for i := 0; i < len(body); i++ { - miniBlock := body[i] - - mbBytes, err := sp.marshalizer.Marshal(miniBlock) - if err != nil { - return err - } - mbHash := sp.hasher.Compute(string(mbBytes)) - - mbHdr, ok := mbHashesFromHdr[string(mbHash)] - if !ok { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.TxCount != uint32(len(miniBlock.TxHashes)) { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.ReceiverShardID != miniBlock.ReceiverShardID { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.SenderShardID != miniBlock.SenderShardID { - return process.ErrHeaderBodyMismatch - } - } - - return nil + mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) + for i := 0; i < len(hdr.MiniBlockHeaders); i++ { + mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] + } + + if len(hdr.MiniBlockHeaders) != len(body) { + return process.ErrHeaderBodyMismatch + } + + for i := 0; i < len(body); i++ { + miniBlock := body[i] + + mbBytes, err := sp.marshalizer.Marshal(miniBlock) + if err != nil { + return err + } + mbHash := sp.hasher.Compute(string(mbBytes)) + + mbHdr, ok := mbHashesFromHdr[string(mbHash)] + if !ok { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.TxCount != uint32(len(miniBlock.TxHashes)) { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.ReceiverShardID != miniBlock.ReceiverShardID { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.SenderShardID != miniBlock.SenderShardID { + return process.ErrHeaderBodyMismatch + } + } + + return nil } func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - log.Debug(err.Error()) - return - } - - sortedHdrs := make([]data.HeaderHandler, 0) - for i := 0; i < len(orderedMetaBlocks); i++ { - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - sortedHdrs = append(sortedHdrs, hdr) - } - - err = sp.requestHeadersIfMissing(sortedHdrs, sharding.MetachainShardId, round) - if err != nil { - log.Info(err.Error()) - } - - return + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + log.Debug(err.Error()) + return + } + + sortedHdrs := make([]data.HeaderHandler, 0) + for i := 0; i < len(orderedMetaBlocks); i++ { + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } + sortedHdrs = append(sortedHdrs, hdr) + } + + err = sp.requestHeadersIfMissing(sortedHdrs, sharding.MetachainShardId, round) + if err != nil { + log.Info(err.Error()) + } + + return } func (sp *shardProcessor) indexBlockIfNeeded( - body data.BodyHandler, - header data.HeaderHandler) { - if sp.core == nil || sp.core.Indexer() == nil { - return - } - - txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) - scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) - rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) - - - for hash, tx := range scPool { - txPool[hash] = tx - } - for hash, tx := range rewardPool { - txPool[hash] = tx - } - - go sp.core.Indexer().SaveBlock(body, header, txPool) + body data.BodyHandler, + header data.HeaderHandler) { + if sp.core == nil || sp.core.Indexer() == nil { + return + } + + txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) + scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) + + for hash, tx := range scPool { + txPool[hash] = tx + } + for hash, tx := range rewardPool { + txPool[hash] = tx + } + + go sp.core.Indexer().SaveBlock(body, header, txPool) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error { - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return process.ErrNilTxBlockBody - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return process.ErrWrongTypeAssertion - } - - header, ok := headerHandler.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) - go sp.txCounter.substractRestoredTxs(restoredTxNr) - if err != nil { - return err - } - - miniBlockHashes := header.MapMiniBlockHashesToShards() - err = sp.restoreMetaBlockIntoPool(miniBlockHashes, header.MetaBlockHashes) - if err != nil { - return err - } - - sp.removeLastNotarized() - - return nil + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return process.ErrNilTxBlockBody + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return process.ErrWrongTypeAssertion + } + + header, ok := headerHandler.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) + go sp.txCounter.substractRestoredTxs(restoredTxNr) + if err != nil { + return err + } + + miniBlockHashes := header.MapMiniBlockHashesToShards() + err = sp.restoreMetaBlockIntoPool(miniBlockHashes, header.MetaBlockHashes) + if err != nil { + return err + } + + sp.removeLastNotarized() + + return nil } func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]uint32, metaBlockHashes [][]byte) error { - metaBlockPool := sp.dataPool.MetaBlocks() - if metaBlockPool == nil { - return process.ErrNilMetaBlockPool - } - - metaHeaderNoncesPool := sp.dataPool.HeadersNonces() - if metaHeaderNoncesPool == nil { - return process.ErrNilMetaHeadersNoncesDataPool - } - - for _, metaBlockHash := range metaBlockHashes { - buff, err := sp.store.Get(dataRetriever.MetaBlockUnit, metaBlockHash) - if err != nil { - continue - } - - metaBlock := block.MetaBlock{} - err = sp.marshalizer.Unmarshal(&metaBlock, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - metaBlockPool.Put(metaBlockHash, &metaBlock) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(metaBlock.GetShardID(), metaBlockHash) - metaHeaderNoncesPool.Merge(metaBlock.Nonce, syncMap) - - err = sp.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) - if err != nil { - log.Error(err.Error()) - } - - nonceToByteSlice := sp.uint64Converter.ToByteSlice(metaBlock.Nonce) - err = sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) - if err != nil { - log.Error(err.Error()) - } - } - - for _, metaBlockKey := range metaBlockPool.Keys() { - if len(miniBlockHashes) == 0 { - break - } - metaBlock, ok := metaBlockPool.Peek(metaBlockKey) - if !ok { - log.Error(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(data.HeaderHandler) - if !ok { - metaBlockPool.Remove(metaBlockKey) - log.Error(process.ErrWrongTypeAssertion.Error()) - continue - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[key] - if !ok { - continue - } - - hdr.SetMiniBlockProcessed([]byte(key), false) - } - } - - return nil + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { + return process.ErrNilMetaBlockPool + } + + metaHeaderNoncesPool := sp.dataPool.HeadersNonces() + if metaHeaderNoncesPool == nil { + return process.ErrNilMetaHeadersNoncesDataPool + } + + for _, metaBlockHash := range metaBlockHashes { + buff, err := sp.store.Get(dataRetriever.MetaBlockUnit, metaBlockHash) + if err != nil { + continue + } + + metaBlock := block.MetaBlock{} + err = sp.marshalizer.Unmarshal(&metaBlock, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + metaBlockPool.Put(metaBlockHash, &metaBlock) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(metaBlock.GetShardID(), metaBlockHash) + metaHeaderNoncesPool.Merge(metaBlock.Nonce, syncMap) + + err = sp.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) + if err != nil { + log.Error(err.Error()) + } + + nonceToByteSlice := sp.uint64Converter.ToByteSlice(metaBlock.Nonce) + err = sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) + if err != nil { + log.Error(err.Error()) + } + } + + for _, metaBlockKey := range metaBlockPool.Keys() { + if len(miniBlockHashes) == 0 { + break + } + metaBlock, ok := metaBlockPool.Peek(metaBlockKey) + if !ok { + log.Error(process.ErrNilMetaBlockHeader.Error()) + continue + } + + hdr, ok := metaBlock.(data.HeaderHandler) + if !ok { + metaBlockPool.Remove(metaBlockKey) + log.Error(process.ErrWrongTypeAssertion.Error()) + continue + } + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range miniBlockHashes { + _, ok := crossMiniBlockHashes[key] + if !ok { + continue + } + + hdr.SetMiniBlockProcessed([]byte(key), false) + } + } + + return nil } // CreateBlockBody creates a a list of miniblocks by filling them with transactions out of the transactions pools // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - sp.txCoordinator.CreateBlockStarted() - sp.blockSizeThrottler.ComputeMaxItems() + sp.txCoordinator.CreateBlockStarted() + sp.blockSizeThrottler.ComputeMaxItems() - miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) - if err != nil { - return nil, err - } + miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + if err != nil { + return nil, err + } - return miniBlocks, nil + return miniBlocks, nil } // CommitBlock commits the block in the blockchain if everything was checked successfully func (sp *shardProcessor) CommitBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - var err error - defer func() { - if err != nil { - sp.RevertAccountState() - } - }() - - err = checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - err = sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - header, ok := headerHandler.(*block.Header) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - buff, err := sp.marshalizer.Marshal(header) - if err != nil { - return err - } - - headerHash := sp.hasher.Compute(string(buff)) - nonceToByteSlice := sp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - - errNotCritical := sp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, headerHash) - log.LogIfError(errNotCritical) - - errNotCritical = sp.store.Put(dataRetriever.BlockHeaderUnit, headerHash, buff) - log.LogIfError(errNotCritical) - - headerNoncePool := sp.dataPool.HeadersNonces() - if headerNoncePool == nil { - err = process.ErrNilDataPoolHolder - return err - } - - //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(headerHandler.GetShardID(), headerHash) - headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) - - body, ok := bodyHandler.(block.Body) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - err = sp.txCoordinator.SaveBlockDataToStorage(body) - if err != nil { - return err - } - - for i := 0; i < len(body); i++ { - buff, err = sp.marshalizer.Marshal(body[i]) - if err != nil { - return err - } - - miniBlockHash := sp.hasher.Compute(string(buff)) - errNotCritical = sp.store.Put(dataRetriever.MiniBlockUnit, miniBlockHash, buff) - log.LogIfError(errNotCritical) - } - - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - err = sp.saveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - if err != nil { - return err - } - - headerMeta, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - sp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMeta.GetNonce())) - - _, err = sp.accounts.Commit() - if err != nil { - return err - } - - log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", - header.Nonce, - core.ToB64(headerHash))) - - sp.blocksTracker.AddBlock(header) - - errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - finalHeader, finalHeaderHash, errNotCritical := sp.getHighestHdrForOwnShardFromMetachain(header.Round) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeader, finalHeaderHash) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - hdrsToAttestFinality := uint32(header.Nonce - finalHeader.Nonce) - sp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) - - err = chainHandler.SetCurrentBlockBody(body) - if err != nil { - return err - } - - err = chainHandler.SetCurrentBlockHeader(header) - if err != nil { - return err - } - - chainHandler.SetCurrentBlockHeaderHash(headerHash) - - sp.indexBlockIfNeeded(bodyHandler, headerHandler) - - // write data to log - go sp.txCounter.displayLogInfo( - header, - body, - headerHash, - sp.shardCoordinator.NumberOfShards(), - sp.shardCoordinator.SelfId(), - sp.dataPool, - ) - - sp.blockSizeThrottler.Succeed(header.Round) - - return nil + var err error + defer func() { + if err != nil { + sp.RevertAccountState() + } + }() + + err = checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + err = sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + header, ok := headerHandler.(*block.Header) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + buff, err := sp.marshalizer.Marshal(header) + if err != nil { + return err + } + + headerHash := sp.hasher.Compute(string(buff)) + nonceToByteSlice := sp.uint64Converter.ToByteSlice(header.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) + + errNotCritical := sp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, headerHash) + log.LogIfError(errNotCritical) + + errNotCritical = sp.store.Put(dataRetriever.BlockHeaderUnit, headerHash, buff) + log.LogIfError(errNotCritical) + + headerNoncePool := sp.dataPool.HeadersNonces() + if headerNoncePool == nil { + err = process.ErrNilDataPoolHolder + return err + } + + //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(headerHandler.GetShardID(), headerHash) + headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) + + body, ok := bodyHandler.(block.Body) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + err = sp.txCoordinator.SaveBlockDataToStorage(body) + if err != nil { + return err + } + + for i := 0; i < len(body); i++ { + buff, err = sp.marshalizer.Marshal(body[i]) + if err != nil { + return err + } + + miniBlockHash := sp.hasher.Compute(string(buff)) + errNotCritical = sp.store.Put(dataRetriever.MiniBlockUnit, miniBlockHash, buff) + log.LogIfError(errNotCritical) + } + + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + err = sp.saveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + if err != nil { + return err + } + + headerMeta, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + sp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMeta.GetNonce())) + + _, err = sp.accounts.Commit() + if err != nil { + return err + } + + log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", + header.Nonce, + core.ToB64(headerHash))) + + sp.blocksTracker.AddBlock(header) + + errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + finalHeader, finalHeaderHash, errNotCritical := sp.getHighestHdrForOwnShardFromMetachain(header.Round) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeader, finalHeaderHash) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + hdrsToAttestFinality := uint32(header.Nonce - finalHeader.Nonce) + sp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) + + err = chainHandler.SetCurrentBlockBody(body) + if err != nil { + return err + } + + err = chainHandler.SetCurrentBlockHeader(header) + if err != nil { + return err + } + + chainHandler.SetCurrentBlockHeaderHash(headerHash) + + sp.indexBlockIfNeeded(bodyHandler, headerHandler) + + // write data to log + go sp.txCounter.displayLogInfo( + header, + body, + headerHash, + sp.shardCoordinator.NumberOfShards(), + sp.shardCoordinator.SelfId(), + sp.dataPool, + ) + + sp.blockSizeThrottler.Succeed(header.Round) + + return nil } // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain(round uint64) (*block.Header, []byte, error) { - highestNonceOwnShIdHdr := &block.Header{} - highestNonceOwnShIdHdrHash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) + highestNonceOwnShIdHdr := &block.Header{} + highestNonceOwnShIdHdrHash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err - } + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err + } - lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err - } + lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err + } - metaHdr, ok := lastNotarizedMetaHdr.(*block.MetaBlock) - if !ok { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, process.ErrWrongTypeAssertion - } + metaHdr, ok := lastNotarizedMetaHdr.(*block.MetaBlock) + if !ok { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, process.ErrWrongTypeAssertion + } - highestNonceOwnShIdHdr = sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), metaHdr) + highestNonceOwnShIdHdr = sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), metaHdr) - for i := 0; i < len(orderedMetaBlocks); i++ { - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } + for i := 0; i < len(orderedMetaBlocks); i++ { + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } - err = sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) - if err != nil { - continue - } + err = sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) + if err != nil { + continue + } - isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) - if !isFinal { - continue - } + isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) + if !isFinal { + continue + } - lastNotarizedMetaHdr = hdr + lastNotarizedMetaHdr = hdr - highestHdr := sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), hdr) - if highestHdr.Nonce > highestNonceOwnShIdHdr.Nonce { - highestNonceOwnShIdHdr = highestHdr - } - } + highestHdr := sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), hdr) + if highestHdr.Nonce > highestNonceOwnShIdHdr.Nonce { + highestNonceOwnShIdHdr = highestHdr + } + } - highestNonceOwnShIdHdrHash, _ = core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) + highestNonceOwnShIdHdrHash, _ = core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, nil + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, nil } func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr *block.MetaBlock) *block.Header { - highestNonceOwnShIdHdr := &block.Header{} - // search for own shard id in shardInfo from metaHeaders - for _, shardInfo := range hdr.ShardInfo { - if shardInfo.ShardId != shardId { - continue - } - - ownHdr, err := process.GetShardHeader(shardInfo.HeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) - if err != nil { - continue - } - - // save the highest nonce - if ownHdr.GetNonce() > highestNonceOwnShIdHdr.GetNonce() { - highestNonceOwnShIdHdr = ownHdr - } - } - - return highestNonceOwnShIdHdr + highestNonceOwnShIdHdr := &block.Header{} + // search for own shard id in shardInfo from metaHeaders + for _, shardInfo := range hdr.ShardInfo { + if shardInfo.ShardId != shardId { + continue + } + + ownHdr, err := process.GetShardHeader(shardInfo.HeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) + if err != nil { + continue + } + + // save the highest nonce + if ownHdr.GetNonce() > highestNonceOwnShIdHdr.GetNonce() { + highestNonceOwnShIdHdr = ownHdr + } + } + + return highestNonceOwnShIdHdr } // getProcessedMetaBlocksFromPool returns all the meta blocks fully processed func (sp *shardProcessor) getProcessedMetaBlocksFromPool(body block.Body, header *block.Header) ([]data.HeaderHandler, error) { - if body == nil { - return nil, process.ErrNilTxBlockBody - } - if header == nil { - return nil, process.ErrNilBlockHeader - } - - miniBlockHashes := make(map[int][]byte, 0) - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { - continue - } - - mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) - if err != nil { - log.Debug(err.Error()) - continue - } - - miniBlockHashes[i] = mbHash - } - - log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - - processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range header.MetaBlockHashes { - metaBlock, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if metaBlock == nil { - log.Debug(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(*block.MetaBlock) - if !ok { - log.Debug(process.ErrWrongTypeAssertion.Error()) - continue - } - - log.Debug(fmt.Sprintf("meta header nonce: %d\n", hdr.Nonce)) - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] - if !ok { - continue - } - - hdr.SetMiniBlockProcessed(miniBlockHashes[key], true) - delete(miniBlockHashes, key) - } - - log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) - - processedAll := true - for key := range crossMiniBlockHashes { - if !hdr.GetMiniBlockProcessed([]byte(key)) { - processedAll = false - break - } - } - - if processedAll { - processedMetaHdrs = append(processedMetaHdrs, hdr) - } - } - - return processedMetaHdrs, nil + if body == nil { + return nil, process.ErrNilTxBlockBody + } + if header == nil { + return nil, process.ErrNilBlockHeader + } + + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { + continue + } + + mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) + if err != nil { + log.Debug(err.Error()) + continue + } + + miniBlockHashes[i] = mbHash + } + + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + + processedMetaHdrs := make([]data.HeaderHandler, 0) + for _, metaBlockKey := range header.MetaBlockHashes { + metaBlock, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + if metaBlock == nil { + log.Debug(process.ErrNilMetaBlockHeader.Error()) + continue + } + + hdr, ok := metaBlock.(*block.MetaBlock) + if !ok { + log.Debug(process.ErrWrongTypeAssertion.Error()) + continue + } + + log.Debug(fmt.Sprintf("meta header nonce: %d\n", hdr.Nonce)) + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] + if !ok { + continue + } + + hdr.SetMiniBlockProcessed(miniBlockHashes[key], true) + delete(miniBlockHashes, key) + } + + log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) + + processedAll := true + for key := range crossMiniBlockHashes { + if !hdr.GetMiniBlockProcessed([]byte(key)) { + processedAll = false + break + } + } + + if processedAll { + processedMetaHdrs = append(processedMetaHdrs, hdr) + } + } + + return processedMetaHdrs, nil } func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { - lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - processed := 0 - unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) - // processedMetaHdrs is also sorted - for i := 0; i < len(processedMetaHdrs); i++ { - hdr := processedMetaHdrs[i] - - // remove process finished - if hdr.GetNonce() > lastNotarizedMetaHdr.GetNonce() { - continue - } - - errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) - log.LogIfError(errNotCritical) - - // metablock was processed and finalized - buff, err := sp.marshalizer.Marshal(hdr) - if err != nil { - log.Error(err.Error()) - continue - } - - headerHash := sp.hasher.Compute(string(buff)) - nonceToByteSlice := sp.uint64Converter.ToByteSlice(hdr.GetNonce()) - err = sp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) - if err != nil { - log.Error(err.Error()) - continue - } - - err = sp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - sp.dataPool.MetaBlocks().Remove(headerHash) - sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) - - log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", - hdr.GetRound(), - hdr.GetNonce(), - core.ToB64(headerHash))) - - processed++ - } - - if processed > 0 { - log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) - } - - notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) - if notarized > 0 { - log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) - } - - return nil + lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + processed := 0 + unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) + // processedMetaHdrs is also sorted + for i := 0; i < len(processedMetaHdrs); i++ { + hdr := processedMetaHdrs[i] + + // remove process finished + if hdr.GetNonce() > lastNotarizedMetaHdr.GetNonce() { + continue + } + + errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) + log.LogIfError(errNotCritical) + + // metablock was processed and finalized + buff, err := sp.marshalizer.Marshal(hdr) + if err != nil { + log.Error(err.Error()) + continue + } + + headerHash := sp.hasher.Compute(string(buff)) + nonceToByteSlice := sp.uint64Converter.ToByteSlice(hdr.GetNonce()) + err = sp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) + if err != nil { + log.Error(err.Error()) + continue + } + + err = sp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + sp.dataPool.MetaBlocks().Remove(headerHash) + sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) + + log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", + hdr.GetRound(), + hdr.GetNonce(), + core.ToB64(headerHash))) + + processed++ + } + + if processed > 0 { + log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) + } + + notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) + if notarized > 0 { + log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) + } + + return nil } // receivedMetaBlock is a callback function when a new metablock was received // upon receiving, it parses the new metablock and requests miniblocks and transactions // which destination is the current shard func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { - metaBlksCache := sp.dataPool.MetaBlocks() - if metaBlksCache == nil { - return - } - - metaHdrsNoncesCache := sp.dataPool.HeadersNonces() - if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { - return - } - - miniBlksCache := sp.dataPool.MiniBlocks() - if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { - return - } - - obj, ok := metaBlksCache.Peek(metaBlockHash) - if !ok { - return - } - - metaBlock, ok := obj.(data.HeaderHandler) - if !ok { - return - } - - log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", - core.ToB64(metaBlockHash), - metaBlock.GetNonce())) - - sp.mutRequestedMetaHdrsHashes.Lock() - - if !sp.allNeededMetaHdrsFound { - if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { - delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) - - if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.GetNonce() - } - } - - lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqMetaHdrsHashes == 0 { - requestedBlockHeaders := sp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - areFinalAttestingHdrsInCache = true - } else { - log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) - } - } - - sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache - - sp.mutRequestedMetaHdrsHashes.Unlock() - - if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { - sp.chRcvAllMetaHdrs <- true - } - } else { - sp.mutRequestedMetaHdrsHashes.Unlock() - } - - lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return - } - if metaBlock.GetNonce() <= lastNotarizedHdr.GetNonce() { - return - } - if metaBlock.GetRound() <= lastNotarizedHdr.GetRound() { - return - } - - sp.txCoordinator.RequestMiniBlocks(metaBlock) + metaBlksCache := sp.dataPool.MetaBlocks() + if metaBlksCache == nil { + return + } + + metaHdrsNoncesCache := sp.dataPool.HeadersNonces() + if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { + return + } + + miniBlksCache := sp.dataPool.MiniBlocks() + if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { + return + } + + obj, ok := metaBlksCache.Peek(metaBlockHash) + if !ok { + return + } + + metaBlock, ok := obj.(data.HeaderHandler) + if !ok { + return + } + + log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", + core.ToB64(metaBlockHash), + metaBlock.GetNonce())) + + sp.mutRequestedMetaHdrsHashes.Lock() + + if !sp.allNeededMetaHdrsFound { + if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { + delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) + + if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = metaBlock.GetNonce() + } + } + + lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) + areFinalAttestingHdrsInCache := false + if lenReqMetaHdrsHashes == 0 { + requestedBlockHeaders := sp.requestFinalMissingHeaders() + if requestedBlockHeaders == 0 { + areFinalAttestingHdrsInCache = true + } else { + log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) + } + } + + sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache + + sp.mutRequestedMetaHdrsHashes.Unlock() + + if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { + sp.chRcvAllMetaHdrs <- true + } + } else { + sp.mutRequestedMetaHdrsHashes.Unlock() + } + + lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return + } + if metaBlock.GetNonce() <= lastNotarizedHdr.GetNonce() { + return + } + if metaBlock.GetRound() <= lastNotarizedHdr.GetRound() { + return + } + + sp.txCoordinator.RequestMiniBlocks(metaBlock) } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the metaBlockFinality headers greater than the highest meta header related to the block // which should be processed func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { - requestedBlockHeaders := uint32(0) - for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { - if sp.currHighestMetaHdrNonce == uint64(0) { - continue - } - - _, _, err := process.GetMetaHeaderFromPoolWithNonce( - i, - sp.dataPool.MetaBlocks(), - sp.dataPool.HeadersNonces()) - if err != nil { - requestedBlockHeaders++ - go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) - } - } - - return requestedBlockHeaders + requestedBlockHeaders := uint32(0) + for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { + if sp.currHighestMetaHdrNonce == uint64(0) { + continue + } + + _, _, err := process.GetMetaHeaderFromPoolWithNonce( + i, + sp.dataPool.MetaBlocks(), + sp.dataPool.HeadersNonces()) + if err != nil { + requestedBlockHeaders++ + go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) + } + } + + return requestedBlockHeaders } func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint32) { - sp.mutRequestedMetaHdrsHashes.Lock() - - sp.allNeededMetaHdrsFound = true - - if len(header.MetaBlockHashes) == 0 { - sp.mutRequestedMetaHdrsHashes.Unlock() - return 0, 0 - } - - missingHeaderHashes := sp.computeMissingHeaders(header) - - requestedBlockHeaders := uint32(0) - sp.requestedMetaHdrsHashes = make(map[string]bool) - for _, hash := range missingHeaderHashes { - requestedBlockHeaders++ - sp.requestedMetaHdrsHashes[string(hash)] = true - go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) - } - - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } else { - requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } - } - - if !sp.allNeededMetaHdrsFound { - process.EmptyChannel(sp.chRcvAllMetaHdrs) - } - - sp.mutRequestedMetaHdrsHashes.Unlock() - - return requestedBlockHeaders, requestedFinalBlockHeaders + sp.mutRequestedMetaHdrsHashes.Lock() + + sp.allNeededMetaHdrsFound = true + + if len(header.MetaBlockHashes) == 0 { + sp.mutRequestedMetaHdrsHashes.Unlock() + return 0, 0 + } + + missingHeaderHashes := sp.computeMissingHeaders(header) + + requestedBlockHeaders := uint32(0) + sp.requestedMetaHdrsHashes = make(map[string]bool) + for _, hash := range missingHeaderHashes { + requestedBlockHeaders++ + sp.requestedMetaHdrsHashes[string(hash)] = true + go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) + } + + requestedFinalBlockHeaders := uint32(0) + if requestedBlockHeaders > 0 { + sp.allNeededMetaHdrsFound = false + } else { + requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() + if requestedFinalBlockHeaders > 0 { + sp.allNeededMetaHdrsFound = false + } + } + + if !sp.allNeededMetaHdrsFound { + process.EmptyChannel(sp.chRcvAllMetaHdrs) + } + + sp.mutRequestedMetaHdrsHashes.Unlock() + + return requestedBlockHeaders, requestedFinalBlockHeaders } func (sp *shardProcessor) computeMissingHeaders(header *block.Header) [][]byte { - missingHeaders := make([][]byte, 0) - sp.currHighestMetaHdrNonce = uint64(0) - - for i := 0; i < len(header.MetaBlockHashes); i++ { - hdr, err := process.GetMetaHeaderFromPool( - header.MetaBlockHashes[i], - sp.dataPool.MetaBlocks()) - if err != nil { - missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) - continue - } - - if hdr.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = hdr.Nonce - } - } - - return missingHeaders + missingHeaders := make([][]byte, 0) + sp.currHighestMetaHdrNonce = uint64(0) + + for i := 0; i < len(header.MetaBlockHashes); i++ { + hdr, err := process.GetMetaHeaderFromPool( + header.MetaBlockHashes[i], + sp.dataPool.MetaBlocks()) + if err != nil { + missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) + continue + } + + if hdr.Nonce > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = hdr.Nonce + } + } + + return missingHeaders } func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) error { - mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) - if err != nil { - return err - } - - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - if _, ok := mMiniBlockMeta[mbHash]; !ok { - return process.ErrCrossShardMBWithoutConfirmationFromMeta - } - } - - return nil + mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) + if err != nil { + return err + } + + miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for mbHash := range miniBlockDstMe { + if _, ok := mMiniBlockMeta[mbHash]; !ok { + return process.ErrCrossShardMBWithoutConfirmationFromMeta + } + } + + return nil } func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes [][]byte) (map[string][]byte, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, err - } - - mMiniBlockMeta := make(map[string][]byte) - for _, metaHash := range metaHashes { - val, _ := metaBlockCache.Peek(metaHash) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - if hdr.GetRound() <= lastHdr.GetRound() { - continue - } - if hdr.GetNonce() <= lastHdr.GetNonce() { - continue - } - - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - mMiniBlockMeta[mbHash] = metaHash - } - } - - return mMiniBlockMeta, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil { + return nil, process.ErrNilMetaBlockPool + } + + lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, err + } + + mMiniBlockMeta := make(map[string][]byte) + for _, metaHash := range metaHashes { + val, _ := metaBlockCache.Peek(metaHash) + if val == nil { + continue + } + + hdr, ok := val.(*block.MetaBlock) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + if hdr.GetRound() <= lastHdr.GetRound() { + continue + } + if hdr.GetNonce() <= lastHdr.GetNonce() { + continue + } + + miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for mbHash := range miniBlockDstMe { + mMiniBlockMeta[mbHash] = metaHash + } + } + + return mMiniBlockMeta, nil } func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, err - } - - orderedMetaBlocks := make([]*hashAndHdr, 0) - for _, key := range metaBlockCache.Keys() { - val, _ := metaBlockCache.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - if hdr.GetRound() <= lastHdr.GetRound() { - continue - } - if hdr.GetNonce() <= lastHdr.GetNonce() { - continue - } - - orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) - } - - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) - - return orderedMetaBlocks, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil { + return nil, process.ErrNilMetaBlockPool + } + + lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, err + } + + orderedMetaBlocks := make([]*hashAndHdr, 0) + for _, key := range metaBlockCache.Keys() { + val, _ := metaBlockCache.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.MetaBlock) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + if hdr.GetRound() <= lastHdr.GetRound() { + continue + } + if hdr.GetNonce() <= lastHdr.GetNonce() { + continue + } + + orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) + } + + sort.Slice(orderedMetaBlocks, func(i, j int) bool { + return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() + }) + + return orderedMetaBlocks, nil } // isMetaHeaderFinal verifies if meta is trully final, in order to not do rollbacks func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHdrs []*hashAndHdr, startPos int) bool { - if currHdr == nil || currHdr.IsInterfaceNil() { - return false - } - if sortedHdrs == nil { - return false - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := 0 - - for i := startPos; i < len(sortedHdrs); i++ { - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - // found a header with the next nonce - tmpHdr := sortedHdrs[i].hdr - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - return false + if currHdr == nil || currHdr.IsInterfaceNil() { + return false + } + if sortedHdrs == nil { + return false + } + + // verify if there are "K" block after current to make this one final + lastVerifiedHdr := currHdr + nextBlocksVerified := 0 + + for i := startPos; i < len(sortedHdrs); i++ { + if nextBlocksVerified >= sp.metaBlockFinality { + return true + } + + // found a header with the next nonce + tmpHdr := sortedHdrs[i].hdr + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified >= sp.metaBlockFinality { + return true + } + + return false } // full verification through metachain header func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( - noShards uint32, - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + noShards uint32, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) (block.MiniBlockSlice, [][]byte, uint32, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMetaBlockPool - } - - miniBlockCache := sp.dataPool.MiniBlocks() - if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMiniBlockPool - } - - txPool := sp.dataPool.Transactions() - if txPool == nil || txPool.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilTransactionPool - } - - miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) - - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return nil, nil, 0, err - } - - log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) - - lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, nil, 0, err - } - - // do processing in order - usedMetaHdrsHashes := make([][]byte, 0) - for i := 0; i < len(orderedMetaBlocks); i++ { - if !haveTime() { - log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) - break - } - - itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) - if itemsAddedInHeader >= maxItemsInBlock { - log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) - break - } - - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - - err = sp.isHdrConstructionValid(hdr, lastMetaHdr) - if err != nil { - continue - } - - isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) - if !isFinal { - continue - } - - if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) - lastMetaHdr = hdr - continue - } - - itemsAddedInBody := nrTxAdded - if itemsAddedInBody >= maxItemsInBlock { - continue - } - - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 - - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr, - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) - - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, currMBProcessed...) - nrTxAdded = nrTxAdded + currTxsAdded - - if currTxsAdded > 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) - } - - if !hdrProcessFinished { - break - } - - lastMetaHdr = hdr - } - } - - sp.mutUsedMetaHdrsHashes.Lock() - sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes - sp.mutUsedMetaHdrsHashes.Unlock() - - return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilMetaBlockPool + } + + miniBlockCache := sp.dataPool.MiniBlocks() + if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilMiniBlockPool + } + + txPool := sp.dataPool.Transactions() + if txPool == nil || txPool.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilTransactionPool + } + + miniBlocks := make(block.MiniBlockSlice, 0) + nrTxAdded := uint32(0) + + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return nil, nil, 0, err + } + + log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) + + lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, nil, 0, err + } + + // do processing in order + usedMetaHdrsHashes := make([][]byte, 0) + for i := 0; i < len(orderedMetaBlocks); i++ { + if !haveTime() { + log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) + break + } + + itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) + if itemsAddedInHeader >= maxItemsInBlock { + log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) + break + } + + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } + + err = sp.isHdrConstructionValid(hdr, lastMetaHdr) + if err != nil { + continue + } + + isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) + if !isFinal { + continue + } + + if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { + usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + lastMetaHdr = hdr + continue + } + + itemsAddedInBody := nrTxAdded + if itemsAddedInBody >= maxItemsInBlock { + continue + } + + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) + maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 + + if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( + hdr, + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) + + // all txs processed, add to processed miniblocks + miniBlocks = append(miniBlocks, currMBProcessed...) + nrTxAdded = nrTxAdded + currTxsAdded + + if currTxsAdded > 0 { + usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + } + + if !hdrProcessFinished { + break + } + + lastMetaHdr = hdr + } + } + + sp.mutUsedMetaHdrsHashes.Lock() + sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes + sp.mutUsedMetaHdrsHashes.Unlock() + + return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil } func (sp *shardProcessor) createMiniBlocks( - noShards uint32, - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + noShards uint32, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) (block.Body, error) { - miniBlocks := make(block.Body, 0) + miniBlocks := make(block.Body, 0) - if sp.accounts.JournalLen() != 0 { - return nil, process.ErrAccountStateDirty - } + if sp.accounts.JournalLen() != 0 { + return nil, process.ErrAccountStateDirty + } - if !haveTime() { - log.Info(fmt.Sprintf("time is up after entered in createMiniBlocks method\n")) - return nil, process.ErrTimeIsOut - } + if !haveTime() { + log.Info(fmt.Sprintf("time is up after entered in createMiniBlocks method\n")) + return nil, process.ErrTimeIsOut + } - txPool := sp.dataPool.Transactions() - if txPool == nil { - return nil, process.ErrNilTransactionPool - } + txPool := sp.dataPool.Transactions() + if txPool == nil { + return nil, process.ErrNilTransactionPool + } - destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) - if err != nil { - log.Info(err.Error()) - } + destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + if err != nil { + log.Info(err.Error()) + } - log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) + log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) - if len(destMeMiniBlocks) > 0 { - miniBlocks = append(miniBlocks, destMeMiniBlocks...) - } + if len(destMeMiniBlocks) > 0 { + miniBlocks = append(miniBlocks, destMeMiniBlocks...) + } - if !haveTime() { - log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) - return miniBlocks, nil - } + if !haveTime() { + log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) + return miniBlocks, nil + } - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) + maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) + if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) - if len(mbFromMe) > 0 { - miniBlocks = append(miniBlocks, mbFromMe...) - } - } + if len(mbFromMe) > 0 { + miniBlocks = append(miniBlocks, mbFromMe...) + } + } - log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) - return miniBlocks, nil + log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) + return miniBlocks, nil } // CreateBlockHeader creates a miniblock header list given a block body func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) - header := &block.Header{ - MiniBlockHeaders: make([]block.MiniBlockHeader, 0), - RootHash: sp.getRootHash(), - ShardId: sp.shardCoordinator.SelfId(), - PrevRandSeed: make([]byte, 0), - RandSeed: make([]byte, 0), - } - - defer func() { - go sp.checkAndRequestIfMetaHeadersMissing(round) - }() - - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return header, nil - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - mbLen := len(body) - totalTxCount := 0 - miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) - for i := 0; i < mbLen; i++ { - txCount := len(body[i].TxHashes) - totalTxCount += txCount - mbBytes, err := sp.marshalizer.Marshal(body[i]) - if err != nil { - return nil, err - } - mbHash := sp.hasher.Compute(string(mbBytes)) - - miniBlockHeaders[i] = block.MiniBlockHeader{ - Hash: mbHash, - SenderShardID: body[i].SenderShardID, - ReceiverShardID: body[i].ReceiverShardID, - TxCount: uint32(txCount), - Type: body[i].Type, - } - } - - header.MiniBlockHeaders = miniBlockHeaders - header.TxCount = uint32(totalTxCount) - - sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) - - sp.mutUsedMetaHdrsHashes.Lock() - - if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { - header.MetaBlockHashes = usedMetaHdrsHashes - delete(sp.usedMetaHdrsHashes, round) - } - - sp.mutUsedMetaHdrsHashes.Unlock() - - sp.blockSizeThrottler.Add( - round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) - - return header, nil + log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + header := &block.Header{ + MiniBlockHeaders: make([]block.MiniBlockHeader, 0), + RootHash: sp.getRootHash(), + ShardId: sp.shardCoordinator.SelfId(), + PrevRandSeed: make([]byte, 0), + RandSeed: make([]byte, 0), + } + + defer func() { + go sp.checkAndRequestIfMetaHeadersMissing(round) + }() + + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return header, nil + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + mbLen := len(body) + totalTxCount := 0 + miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) + for i := 0; i < mbLen; i++ { + txCount := len(body[i].TxHashes) + totalTxCount += txCount + mbBytes, err := sp.marshalizer.Marshal(body[i]) + if err != nil { + return nil, err + } + mbHash := sp.hasher.Compute(string(mbBytes)) + + miniBlockHeaders[i] = block.MiniBlockHeader{ + Hash: mbHash, + SenderShardID: body[i].SenderShardID, + ReceiverShardID: body[i].ReceiverShardID, + TxCount: uint32(txCount), + Type: body[i].Type, + } + } + + header.MiniBlockHeaders = miniBlockHeaders + header.TxCount = uint32(totalTxCount) + + sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) + + sp.mutUsedMetaHdrsHashes.Lock() + + if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { + header.MetaBlockHashes = usedMetaHdrsHashes + delete(sp.usedMetaHdrsHashes, round) + } + + sp.mutUsedMetaHdrsHashes.Unlock() + + sp.blockSizeThrottler.Add( + round, + core.Max(header.ItemsInBody(), header.ItemsInHeader())) + + return header, nil } func (sp *shardProcessor) waitForMetaHdrHashes(waitTime time.Duration) error { - select { - case <-sp.chRcvAllMetaHdrs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-sp.chRcvAllMetaHdrs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } // MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination func (sp *shardProcessor) MarshalizedDataToBroadcast( - header data.HeaderHandler, - bodyHandler data.BodyHandler, + header data.HeaderHandler, + bodyHandler data.BodyHandler, ) (map[uint32][]byte, map[string][][]byte, error) { - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return nil, nil, process.ErrNilMiniBlocks - } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return nil, nil, process.ErrNilMiniBlocks + } - body, ok := bodyHandler.(block.Body) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + body, ok := bodyHandler.(block.Body) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - mrsData := make(map[uint32][]byte) - bodies, mrsTxs := sp.txCoordinator.CreateMarshalizedData(body) + mrsData := make(map[uint32][]byte) + bodies, mrsTxs := sp.txCoordinator.CreateMarshalizedData(body) - for shardId, subsetBlockBody := range bodies { - buff, err := sp.marshalizer.Marshal(subsetBlockBody) - if err != nil { - log.Debug(process.ErrMarshalWithoutSuccess.Error()) - continue - } - mrsData[shardId] = buff - } + for shardId, subsetBlockBody := range bodies { + buff, err := sp.marshalizer.Marshal(subsetBlockBody) + if err != nil { + log.Debug(process.ErrMarshalWithoutSuccess.Error()) + continue + } + mrsData[shardId] = buff + } - return mrsData, mrsTxs, nil + return mrsData, mrsTxs, nil } // DecodeBlockBody method decodes block body from a given byte array func (sp *shardProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.Body + var body block.Body - err := sp.marshalizer.Unmarshal(&body, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := sp.marshalizer.Unmarshal(&body, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return body + return body } // DecodeBlockHeader method decodes block header from a given byte array func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.Header + var header block.Header - err := sp.marshalizer.Unmarshal(&header, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := sp.marshalizer.Unmarshal(&header, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &header + return &header } // IsInterfaceNil returns true if there is no value under the interface func (sp *shardProcessor) IsInterfaceNil() bool { - if sp == nil { - return true - } - return false + if sp == nil { + return true + } + return false } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 86e36ee40e6..e7cf1065e76 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -4658,11 +4658,11 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessor( &mock.ServiceContainerMock{}, @@ -4724,11 +4724,11 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessor( &mock.ServiceContainerMock{}, @@ -4789,11 +4789,11 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessor( &mock.ServiceContainerMock{}, diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 4db7cfafb1f..d8d8c15b749 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -2,7 +2,7 @@ package coordinator import ( "fmt" - "sort" + "sort" "sync" "time" @@ -71,12 +71,12 @@ func NewTransactionCoordinator( accounts: accounts, } - tc.miniBlockPool = dataPool.MiniBlocks() - if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { - return nil, process.ErrNilMiniBlockPool - } + tc.miniBlockPool = dataPool.MiniBlocks() + if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { + return nil, process.ErrNilMiniBlockPool + } - tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) tc.onRequestMiniBlock = requestHandler.RequestMiniBlock tc.requestedTxs = make(map[block.Type]int) @@ -234,24 +234,24 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error wg.Wait() - intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreprocSC == nil { - return errFound - } + intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) + if intermediatePreprocSC == nil { + return errFound + } - err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - intermediatePreproc := tc.getInterimProcessor(block.RewardsBlock) - if intermediatePreproc == nil { - return errFound - } + intermediatePreproc := tc.getInterimProcessor(block.RewardsBlock) + if intermediatePreproc == nil { + return errFound + } err = intermediatePreproc.SaveCurrentIntermediateTxToStorage() if err != nil { @@ -362,9 +362,9 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( if separatedBodies[blockType] == nil { continue } - if blockType == block.RewardsBlock { - continue - } + if blockType == block.RewardsBlock { + continue + } preproc := tc.getPreProcessor(blockType) if preproc == nil || preproc.IsInterfaceNil() { @@ -378,21 +378,21 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( } // create the reward txs and make them available for processing - mbRewards := tc.createRewardsMiniBlocks() - preproc := tc.getPreProcessor(block.RewardsBlock) - rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) - if !ok { - return process.ErrWrongTypeAssertion - } + mbRewards := tc.createRewardsMiniBlocks() + preproc := tc.getPreProcessor(block.RewardsBlock) + rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) + if !ok { + return process.ErrWrongTypeAssertion + } - rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) + rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) - err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlock], round, haveTime) - if err != nil { - return err - } + err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlock], round, haveTime) + if err != nil { + return err + } - return nil + return nil } // CreateMbsAndProcessCrossShardTransactionsDstMe creates miniblocks and processes cross shard transaction @@ -523,19 +523,19 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( miniBlocks = append(miniBlocks, interMBs...) } - rewardMb := tc.createRewardsMiniBlocks() - if len(rewardMb) == 0 { - log.Error("could not create reward mini-blocks") - } + rewardMb := tc.createRewardsMiniBlocks() + if len(rewardMb) == 0 { + log.Error("could not create reward mini-blocks") + } - rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) - for _, mb := range rewardMb { - err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) - if err != nil { - log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) - } - } - miniBlocks = append(miniBlocks, rewardMb...) + rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) + for _, mb := range rewardMb { + err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) + if err != nil { + log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) + } + } + miniBlocks = append(miniBlocks, rewardMb...) return miniBlocks } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 6d41da82251..558a0ba9587 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -1,1865 +1,1865 @@ package coordinator import ( - "bytes" - "encoding/hex" - "errors" - "math/big" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/stretchr/testify/assert" + "bytes" + "encoding/hex" + "errors" + "math/big" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" ) func createShardedDataChacherNotifier( - handler data.TransactionHandler, - testHash []byte, + handler data.TransactionHandler, + testHash []byte, ) (func() dataRetriever.ShardedDataCacherNotifier ) { - return func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return handler, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return handler, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - } + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } } func initDataPool(testHash []byte) *mock.PoolsHolderStub { - tx := &transaction.Transaction{Nonce: 10} - sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} - rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} - - txCalled := createShardedDataChacherNotifier(tx, testHash) - unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) - rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) - - sdp := &mock.PoolsHolderStub{ - TransactionsCalled: txCalled, - UnsignedTransactionsCalled: unsignedTxHandler, - RewardTransactionsCalled: rewardTxCalled, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{ - MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, - HasCalled: func(nonce uint64, shardId uint32) bool { - return true - }, - } - }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - }, - MiniBlocksCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - cs.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.RegisterHandlerCalled = func(i func(key []byte)) {} - cs.RemoveCalled = func(key []byte) {} - cs.PutCalled = func(key []byte, value interface{}) (evicted bool) { - return false - } - return cs - }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - return cs - }, - } - return sdp + tx := &transaction.Transaction{Nonce: 10} + sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} + rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} + + txCalled := createShardedDataChacherNotifier(tx, testHash) + unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) + rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) + + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxHandler, + RewardTransactionsCalled: rewardTxCalled, + HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{ + MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, + HasCalled: func(nonce uint64, shardId uint32) bool { + return true + }, + } + }, + MetaBlocksCalled: func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + }, + MiniBlocksCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.RegisterHandlerCalled = func(i func(key []byte)) {} + cs.RemoveCalled = func(key []byte) {} + cs.PutCalled = func(key []byte, value interface{}) (evicted bool) { + return false + } + return cs + }, + HeadersCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + return cs + }, + } + return sdp } func containsHash(txHashes [][]byte, hash []byte) bool { - for _, txHash := range txHashes { - if bytes.Equal(hash, txHash) { - return true - } - } - return false + for _, txHash := range txHashes { + if bytes.Equal(hash, txHash) { + return true + } + } + return false } func initStore() *dataRetriever.ChainStorer { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, generateTestUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, generateTestUnit()) - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, generateTestUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, generateTestUnit()) + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) + return store } func generateTestCache() storage.Cacher { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 1000, 1) - return cache + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 1000, 1) + return cache } func generateTestUnit() storage.Storer { - memDB, _ := memorydb.New() + memDB, _ := memorydb.New() - storer, _ := storageUnit.NewStorageUnit( - generateTestCache(), - memDB, - ) + storer, _ := storageUnit.NewStorageUnit( + generateTestCache(), + memDB, + ) - return storer + return storer } func initAccountsMock() *mock.AccountsStub { - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - return &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + return &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + } } func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - nil, - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + nil, + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - nil, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilAccountsAdapter, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + nil, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilAccountsAdapter, err) } func TestNewTransactionCoordinator_NilDataPool(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - nil, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + nil, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - nil, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilRequestHandler, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + nil, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilRequestHandler, err) } func TestNewTransactionCoordinator_NilHasher(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - nil, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilPreProcessorsContainer, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + nil, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilPreProcessorsContainer, err) } func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - nil, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilIntermediateProcessorContainer, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + nil, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilIntermediateProcessorContainer, err) } func TestNewTransactionCoordinator_OK(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, err) - assert.NotNil(t, tc) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, tc) } func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - separated := tc.separateBodyByType(nil) - assert.Equal(t, 0, len(separated)) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + separated := tc.separateBodyByType(nil) + assert.Equal(t, 0, len(separated)) } func TestTransactionCoordinator_SeparateBody(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - - separated := tc.separateBodyByType(body) - assert.Equal(t, 2, len(separated)) - assert.Equal(t, 3, len(separated[block.TxBlock])) - assert.Equal(t, 4, len(separated[block.SmartContractResultBlock])) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + + separated := tc.separateBodyByType(body) + assert.Equal(t, 2, len(separated)) + assert.Equal(t, 3, len(separated[block.TxBlock])) + assert.Equal(t, 4, len(separated[block.SmartContractResultBlock])) } func createPreProcessorContainer() process.PreProcessorsContainer { - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - initDataPool([]byte("tx_hash0")), - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + initDataPool([]byte("tx_hash0")), + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + return container } func createInterimProcessorContainer() process.IntermediateProcessorContainer { - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - initStore(), - initDataPool([]byte("test_hash1")), - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + initStore(), + initDataPool([]byte("test_hash1")), + ) + container, _ := preFactory.Create() + + return container } func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) process.PreProcessorsContainer { - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + return container } func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.CreateBlockStarted() - - tc.mutPreProcessor.Lock() - for _, value := range tc.txPreProcessors { - txs := value.GetAllCurrentUsedTxs() - assert.Equal(t, 0, len(txs)) - } - tc.mutPreProcessor.Unlock() + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.CreateBlockStarted() + + tc.mutPreProcessor.Lock() + for _, value := range tc.txPreProcessors { + txs := value.GetAllCurrentUsedTxs() + assert.Equal(t, 0, len(txs)) + } + tc.mutPreProcessor.Unlock() } func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - mrBody, mrTxs := tc.CreateMarshalizedData(nil) - assert.Equal(t, 0, len(mrTxs)) - assert.Equal(t, 0, len(mrBody)) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + mrBody, mrTxs := tc.CreateMarshalizedData(nil) + assert.Equal(t, 0, len(mrTxs)) + assert.Equal(t, 0, len(mrBody)) } func createMiniBlockWithOneTx(sndId, dstId uint32, blockType block.Type, txHash []byte) *block.MiniBlock { - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) - return &block.MiniBlock{Type: blockType, SenderShardID: sndId, ReceiverShardID: dstId, TxHashes: txHashes} + return &block.MiniBlock{Type: blockType, SenderShardID: sndId, ReceiverShardID: dstId, TxHashes: txHashes} } func createTestBody() block.Body { - body := block.Body{} + body := block.Body{} - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash2"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash3"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash4"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash5"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash6"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash2"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash3"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash4"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash5"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash6"))) - return body + return body } func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - mrBody, mrTxs := tc.CreateMarshalizedData(createTestBody()) - assert.Equal(t, 0, len(mrTxs)) - assert.Equal(t, 1, len(mrBody)) - assert.Equal(t, len(createTestBody()), len(mrBody[1])) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + mrBody, mrTxs := tc.CreateMarshalizedData(createTestBody()) + assert.Equal(t, 0, len(mrTxs)) + assert.Equal(t, 1, len(mrBody)) + assert.Equal(t, len(createTestBody()), len(mrBody[1])) } func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) { - t.Parallel() - - interimContainer := createInterimProcessorContainer() - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - interimContainer, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - scrs := make([]data.TransactionHandler, 0) - body := block.Body{} - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) - - scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99)} - scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199)} - scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299)} - scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scrInterimProc, _ := interimContainer.Get(block.SmartContractResultBlock) - _ = scrInterimProc.AddIntermediateTransactions(scrs) - - mrBody, mrTxs := tc.CreateMarshalizedData(body) - assert.Equal(t, 1, len(mrTxs)) - - marshalizer := &mock.MarshalizerMock{} - topic := factory.UnsignedTransactionTopic + "_0_1" - assert.Equal(t, len(scrs), len(mrTxs[topic])) - for i := 0; i < len(mrTxs[topic]); i++ { - unMrsScr := &smartContractResult.SmartContractResult{} - _ = marshalizer.Unmarshal(unMrsScr, mrTxs[topic][i]) - - assert.Equal(t, unMrsScr, scrs[i]) - } - - assert.Equal(t, 1, len(mrBody)) + t.Parallel() + + interimContainer := createInterimProcessorContainer() + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + interimContainer, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + scrs := make([]data.TransactionHandler, 0) + body := block.Body{} + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) + + scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99)} + scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199)} + scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299)} + scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scrInterimProc, _ := interimContainer.Get(block.SmartContractResultBlock) + _ = scrInterimProc.AddIntermediateTransactions(scrs) + + mrBody, mrTxs := tc.CreateMarshalizedData(body) + assert.Equal(t, 1, len(mrTxs)) + + marshalizer := &mock.MarshalizerMock{} + topic := factory.UnsignedTransactionTopic + "_0_1" + assert.Equal(t, len(scrs), len(mrTxs[topic])) + for i := 0; i < len(mrTxs[topic]); i++ { + unMrsScr := &smartContractResult.SmartContractResult{} + _ = marshalizer.Unmarshal(unMrsScr, mrTxs[topic][i]) + + assert.Equal(t, unMrsScr, scrs[i]) + } + + assert.Equal(t, 1, len(mrBody)) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNilHeader(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.True(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.True(t, finalized) } func createTestMetablock() *block.MetaBlock { - meta := &block.MetaBlock{} + meta := &block.MetaBlock{} - meta.ShardInfo = make([]block.ShardData, 0) + meta.ShardInfo = make([]block.ShardData, 0) - shardMbs := make([]block.ShardMiniBlockHeader, 0) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb0"), SenderShardId: 0, ReceiverShardId: 0, TxCount: 1}) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb1"), SenderShardId: 0, ReceiverShardId: 1, TxCount: 1}) - shardData := block.ShardData{ShardId: 0, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} + shardMbs := make([]block.ShardMiniBlockHeader, 0) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb0"), SenderShardId: 0, ReceiverShardId: 0, TxCount: 1}) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb1"), SenderShardId: 0, ReceiverShardId: 1, TxCount: 1}) + shardData := block.ShardData{ShardId: 0, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} - meta.ShardInfo = append(meta.ShardInfo, shardData) + meta.ShardInfo = append(meta.ShardInfo, shardData) - shardMbs = make([]block.ShardMiniBlockHeader, 0) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb2"), SenderShardId: 1, ReceiverShardId: 0, TxCount: 1}) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb3"), SenderShardId: 1, ReceiverShardId: 1, TxCount: 1}) - shardData = block.ShardData{ShardId: 1, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} + shardMbs = make([]block.ShardMiniBlockHeader, 0) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb2"), SenderShardId: 1, ReceiverShardId: 0, TxCount: 1}) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb3"), SenderShardId: 1, ReceiverShardId: 1, TxCount: 1}) + shardData = block.ShardData{ShardId: 1, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} - meta.ShardInfo = append(meta.ShardInfo, shardData) + meta.ShardInfo = append(meta.ShardInfo, shardData) - return meta + return meta } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNoTime(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return false - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.False(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return false + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.False(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothingInPool(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.False(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.False(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - tdp.MiniBlocksCalled = func() storage.Cacher { - return hdrPool - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - metaHdr := createTestMetablock() - - for i := 0; i < len(metaHdr.ShardInfo); i++ { - for j := 0; j < len(metaHdr.ShardInfo[i].ShardMiniBlockHeaders); j++ { - mbHdr := metaHdr.ShardInfo[i].ShardMiniBlockHeaders[j] - mb := block.MiniBlock{SenderShardID: mbHdr.SenderShardId, ReceiverShardID: mbHdr.ReceiverShardId, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - tdp.MiniBlocks().Put(mbHdr.Hash, &mb) - } - } - - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 1, len(mbs)) - assert.Equal(t, uint32(1), txs) - assert.True(t, finalized) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + tdp.MiniBlocksCalled = func() storage.Cacher { + return hdrPool + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + metaHdr := createTestMetablock() + + for i := 0; i < len(metaHdr.ShardInfo); i++ { + for j := 0; j < len(metaHdr.ShardInfo[i].ShardMiniBlockHeaders); j++ { + mbHdr := metaHdr.ShardInfo[i].ShardMiniBlockHeaders[j] + mb := block.MiniBlock{SenderShardID: mbHdr.SenderShardId, ReceiverShardID: mbHdr.ReceiverShardId, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + tdp.MiniBlocks().Put(mbHdr.Hash, &mb) + } + } + + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 1, len(mbs)) + assert.Equal(t, uint32(1), txs) + assert.True(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToProcess(t *testing.T) { - t.Parallel() - - shardedCacheMock := &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - }, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + + shardedCacheMock := &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + }, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoTime(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return false - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return false + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoSpace(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(0) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(0) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing.T) { - t.Parallel() - - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - nrShards := uint32(5) - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - for shId := uint32(0); shId < nrShards; shId++ { - strCache := process.ShardCacherIdentifier(0, shId) - newTx := &transaction.Transaction{GasLimit: uint64(shId)} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - } - - // we have one tx per shard. - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, int(nrShards), len(mbs)) + t.Parallel() + + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + nrShards := uint32(5) + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + for shId := uint32(0); shId < nrShards; shId++ { + strCache := process.ShardCacherIdentifier(0, shId) + newTx := &transaction.Transaction{GasLimit: uint64(shId)} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + } + + // we have one tx per shard. + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, int(nrShards), len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMiniblocks(t *testing.T) { - t.Parallel() - - nrShards := uint32(5) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - - numTxsToAdd := 5 - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) - - scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") - addedTxs := make([]*transaction.Transaction, 0) - - allTxs := 100 - for i := 0; i < allTxs; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - - addedTxs = append(addedTxs, newTx) - } - - // we have one tx per shard. - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, allTxs/numTxsToAdd, len(mbs)) + t.Parallel() + + nrShards := uint32(5) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + + numTxsToAdd := 5 + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) + + scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") + addedTxs := make([]*transaction.Transaction, 0) + + allTxs := 100 + for i := 0; i < allTxs; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + + addedTxs = append(addedTxs, newTx) + } + + // we have one tx per shard. + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, allTxs/numTxsToAdd, len(mbs)) } func TestTransactionCoordinator_GetAllCurrentUsedTxs(t *testing.T) { - t.Parallel() - - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - nrShards := uint32(5) - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - usedTxs := tc.GetAllCurrentUsedTxs(block.TxBlock) - assert.Equal(t, 0, len(usedTxs)) - - // create block to have some txs - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - for i := uint32(0); i < nrShards; i++ { - strCache := process.ShardCacherIdentifier(0, i) - newTx := &transaction.Transaction{GasLimit: uint64(i)} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - } - - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - assert.Equal(t, int(nrShards), len(mbs)) - - usedTxs = tc.GetAllCurrentUsedTxs(block.TxBlock) - assert.Equal(t, 5, len(usedTxs)) + t.Parallel() + + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + nrShards := uint32(5) + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + usedTxs := tc.GetAllCurrentUsedTxs(block.TxBlock) + assert.Equal(t, 0, len(usedTxs)) + + // create block to have some txs + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + for i := uint32(0); i < nrShards; i++ { + strCache := process.ShardCacherIdentifier(0, i) + newTx := &transaction.Transaction{GasLimit: uint64(i)} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + } + + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + assert.Equal(t, int(nrShards), len(mbs)) + + usedTxs = tc.GetAllCurrentUsedTxs(block.TxBlock) + assert.Equal(t, 5, len(usedTxs)) } func TestTransactionCoordinator_RequestBlockTransactionsNilBody(t *testing.T) { - t.Parallel() - - tdp := initDataPool([]byte("tx_hash1")) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.RequestBlockTransactions(nil) - - tc.mutRequestedTxs.Lock() - for _, value := range tc.requestedTxs { - assert.Equal(t, 0, value) - } - tc.mutRequestedTxs.Unlock() + t.Parallel() + + tdp := initDataPool([]byte("tx_hash1")) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.RequestBlockTransactions(nil) + + tc.mutRequestedTxs.Lock() + for _, value := range tc.requestedTxs { + assert.Equal(t, 0, value) + } + tc.mutRequestedTxs.Unlock() } func TestTransactionCoordinator_RequestBlockTransactionsRequestOne(t *testing.T) { - t.Parallel() - - txHashInPool := []byte("tx_hash1") - tdp := initDataPool(txHashInPool) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashInPool, txHashToAsk}} - body = append(body, miniBlock) - tc.RequestBlockTransactions(body) - - tc.mutRequestedTxs.Lock() - assert.Equal(t, 1, tc.requestedTxs[block.TxBlock]) - tc.mutRequestedTxs.Unlock() - - haveTime := func() time.Duration { - return time.Second - } - err = tc.IsDataPreparedForProcessing(haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + + txHashInPool := []byte("tx_hash1") + tdp := initDataPool(txHashInPool) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashInPool, txHashToAsk}} + body = append(body, miniBlock) + tc.RequestBlockTransactions(body) + + tc.mutRequestedTxs.Lock() + assert.Equal(t, 1, tc.requestedTxs[block.TxBlock]) + tc.mutRequestedTxs.Unlock() + + haveTime := func() time.Duration { + return time.Second + } + err = tc.IsDataPreparedForProcessing(haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestTransactionCoordinator_IsDataPreparedForProcessing(t *testing.T) { - t.Parallel() - - tdp := initDataPool([]byte("tx_hash1")) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.IsDataPreparedForProcessing(haveTime) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool([]byte("tx_hash1")) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.IsDataPreparedForProcessing(haveTime) + assert.Nil(t, err) } func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //1 tx hash will be in cache - //2 will be requested on network - - txHash1 := []byte("tx hash 1 found in cache") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - //put this miniblock inside datapool - miniBlockHash := []byte("miniblock hash") - dataPool.MiniBlocks().Put(miniBlockHash, miniBlock) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{}, cacheId) - - txHash1Requested := int32(0) - txHash2Requested := int32(0) - txHash3Requested := int32(0) - - requestHandler := &mock.RequestHandlerMock{} - requestHandler.RequestTransactionHandlerCalled = func(destShardID uint32, txHashes [][]byte) { - if containsHash(txHashes, txHash1) { - atomic.AddInt32(&txHash1Requested, 1) - } - if containsHash(txHashes, txHash2) { - atomic.AddInt32(&txHash2Requested, 1) - } - if containsHash(txHashes, txHash3) { - atomic.AddInt32(&txHash3Requested, 1) - } - } - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - requestHandler, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - requestHandler, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - tc.receivedMiniBlock(miniBlockHash) - - //we have to wait to be sure txHash1Requested is not incremented by a late call - time.Sleep(time.Second) - - assert.Equal(t, int32(0), atomic.LoadInt32(&txHash1Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //1 tx hash will be in cache + //2 will be requested on network + + txHash1 := []byte("tx hash 1 found in cache") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + //put this miniblock inside datapool + miniBlockHash := []byte("miniblock hash") + dataPool.MiniBlocks().Put(miniBlockHash, miniBlock) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{}, cacheId) + + txHash1Requested := int32(0) + txHash2Requested := int32(0) + txHash3Requested := int32(0) + + requestHandler := &mock.RequestHandlerMock{} + requestHandler.RequestTransactionHandlerCalled = func(destShardID uint32, txHashes [][]byte) { + if containsHash(txHashes, txHash1) { + atomic.AddInt32(&txHash1Requested, 1) + } + if containsHash(txHashes, txHash2) { + atomic.AddInt32(&txHash2Requested, 1) + } + if containsHash(txHashes, txHash3) { + atomic.AddInt32(&txHash3Requested, 1) + } + } + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + requestHandler, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + requestHandler, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + tc.receivedMiniBlock(miniBlockHash) + + //we have to wait to be sure txHash1Requested is not incremented by a late call + time.Sleep(time.Second) + + assert.Equal(t, int32(0), atomic.LoadInt32(&txHash1Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) } func TestTransactionCoordinator_SaveBlockDataToStorage(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.SaveBlockDataToStorage(nil) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.SaveBlockDataToStorage(nil) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) - assert.Nil(t, err) - assert.Equal(t, 0, nrTxs) - assert.Equal(t, 0, len(mbs)) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) - assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) - assert.Nil(t, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, process.ErrMissingTransaction, err) - - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) - assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) - assert.NotNil(t, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) + assert.Nil(t, err) + assert.Equal(t, 0, nrTxs) + assert.Equal(t, 0, len(mbs)) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + assert.Equal(t, 1, nrTxs) + assert.Equal(t, 1, len(mbs)) + assert.Nil(t, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, process.ErrMissingTransaction, err) + + nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + assert.Equal(t, 1, nrTxs) + assert.Equal(t, 1, len(mbs)) + assert.NotNil(t, err) } func TestTransactionCoordinator_RemoveBlockDataFromPool(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(dataPool), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.RemoveBlockDataFromPool(nil) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.RemoveBlockDataFromPool(body) - assert.Nil(t, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(dataPool), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.RemoveBlockDataFromPool(nil) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.RemoveBlockDataFromPool(body) + assert.Nil(t, err) } func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return process.ErrHigherNonceInTransaction - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.ProcessBlockTransaction(nil, 10, haveTime) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - - noTime := func() time.Duration { - return 0 - } - err = tc.ProcessBlockTransaction(body, 10, noTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return process.ErrHigherNonceInTransaction + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.ProcessBlockTransaction(nil, 10, haveTime) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + + noTime := func() time.Duration { + return 0 + } + err = tc.ProcessBlockTransaction(body, 10, noTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) } func TestTransactionCoordinator_ProcessBlockTransaction(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(dataPool), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.ProcessBlockTransaction(nil, 10, haveTime) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Nil(t, err) - - noTime := func() time.Duration { - return -1 - } - err = tc.ProcessBlockTransaction(body, 10, noTime) - assert.Equal(t, process.ErrTimeIsOut, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(dataPool), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.ProcessBlockTransaction(nil, 10, haveTime) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Nil(t, err) + + noTime := func() time.Duration { + return -1 + } + err = tc.ProcessBlockTransaction(body, 10, noTime) + assert.Equal(t, process.ErrTimeIsOut, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - nrCalled := 0 - mutex := sync.Mutex{} - - requestHandler := &mock.RequestHandlerMock{ - RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - mutex.Lock() - nrCalled++ - mutex.Unlock() - }, - } - - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - accounts, - requestHandler, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - accounts, - dataPool, - requestHandler, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.RequestMiniBlocks(nil) - time.Sleep(time.Second) - mutex.Lock() - assert.Equal(t, 0, nrCalled) - mutex.Unlock() - - header := createTestMetablock() - tc.RequestMiniBlocks(header) - - crossMbs := header.GetMiniBlockHeadersWithDst(shardCoordinator.SelfId()) - time.Sleep(time.Second) - mutex.Lock() - assert.Equal(t, len(crossMbs), nrCalled) - mutex.Unlock() + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nrCalled := 0 + mutex := sync.Mutex{} + + requestHandler := &mock.RequestHandlerMock{ + RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + mutex.Lock() + nrCalled++ + mutex.Unlock() + }, + } + + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + accounts, + requestHandler, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + accounts, + dataPool, + requestHandler, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.RequestMiniBlocks(nil) + time.Sleep(time.Second) + mutex.Lock() + assert.Equal(t, 0, nrCalled) + mutex.Unlock() + + header := createTestMetablock() + tc.RequestMiniBlocks(header) + + crossMbs := header.GetMiniBlockHeadersWithDst(shardCoordinator.SelfId()) + time.Sleep(time.Second) + mutex.Lock() + assert.Equal(t, len(crossMbs), nrCalled) + mutex.Unlock() } func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNotRevertAccntState(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //all txs will be in datapool and none of them will return err when processed - //so, tx processor will return nil on processing tx - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - tx1ExecutionResult := uint64(0) - tx2ExecutionResult := uint64(0) - tx3ExecutionResult := uint64(0) - - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { - tx1ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash2) { - tx2ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash3) { - tx3ExecutionResult = transaction.Nonce - } - - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() bool { - return true - } - preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) - - assert.Nil(t, err) - assert.Equal(t, tx1Nonce, tx1ExecutionResult) - assert.Equal(t, tx2Nonce, tx2ExecutionResult) - assert.Equal(t, tx3Nonce, tx3ExecutionResult) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //all txs will be in datapool and none of them will return err when processed + //so, tx processor will return nil on processing tx + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + tx1ExecutionResult := uint64(0) + tx2ExecutionResult := uint64(0) + tx3ExecutionResult := uint64(0) + + accounts := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + //execution, in this context, means moving the tx nonce to itx corresponding execution result variable + if transaction.Data == string(txHash1) { + tx1ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash2) { + tx2ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash3) { + tx3ExecutionResult = transaction.Nonce + } + + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() bool { + return true + } + preproc := tc.getPreProcessor(block.TxBlock) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) + + assert.Nil(t, err) + assert.Equal(t, tx1Nonce, tx1ExecutionResult) + assert.Equal(t, tx2Nonce, tx2ExecutionResult) + assert.Equal(t, tx3Nonce, tx3ExecutionResult) } func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallRevertAccntState(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //all txs will be in datapool and none of them will return err when processed - //so, tx processor will return nil on processing tx - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2 - this will cause the tx processor to err") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - currentJournalLen := 445 - revertAccntStateCalled := false - - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - if snapshot == currentJournalLen { - revertAccntStateCalled = true - } - - return nil - }, - JournalLenCalled: func() int { - return currentJournalLen - }, - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - if transaction.Data == string(txHash2) { - return process.ErrHigherNonceInTransaction - } - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() bool { - return true - } - preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) - - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - assert.True(t, revertAccntStateCalled) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //all txs will be in datapool and none of them will return err when processed + //so, tx processor will return nil on processing tx + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2 - this will cause the tx processor to err") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + currentJournalLen := 445 + revertAccntStateCalled := false + + accounts := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + if snapshot == currentJournalLen { + revertAccntStateCalled = true + } + + return nil + }, + JournalLenCalled: func() int { + return currentJournalLen + }, + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + if transaction.Data == string(txHash2) { + return process.ErrHigherNonceInTransaction + } + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() bool { + return true + } + preproc := tc.getPreProcessor(block.TxBlock) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) + + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + assert.True(t, revertAccntStateCalled) } func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - adrConv := &mock.AddressConverterMock{} - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - adrConv, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - tdp, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - container, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.VerifyCreatedBlockTransactions(nil) - assert.Nil(t, err) - - body := block.Body{&block.MiniBlock{Type: block.TxBlock}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) - - body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) - - body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Equal(t, process.ErrNilMiniBlocks, err) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) + adrConv := &mock.AddressConverterMock{} + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + adrConv, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + tdp, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + container, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.VerifyCreatedBlockTransactions(nil) + assert.Nil(t, err) + + body := block.Body{&block.MiniBlock{Type: block.TxBlock}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) + + body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) + + body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Equal(t, process.ErrNilMiniBlocks, err) } func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - adrConv := &mock.AddressConverterMock{} - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - adrConv, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - tdp, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - container, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - sndAddr := []byte("0") - rcvAddr := []byte("1") - scr := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: sndAddr, RcvAddr: rcvAddr} - scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - - shardCoordinator.ComputeIdCalled = func(address state.AddressContainer) uint32 { - if bytes.Equal(address.Bytes(), sndAddr) { - return shardCoordinator.SelfId() - } - if bytes.Equal(address.Bytes(), rcvAddr) { - return shardCoordinator.SelfId() + 1 - } - return shardCoordinator.SelfId() + 2 - } - - tdp.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, scrHash) { - return scr, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, scrHash) { - return scr, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - } - - interProc, _ := container.Get(block.SmartContractResultBlock) - tx, _ := tdp.UnsignedTransactions().SearchFirstData(scrHash) - txs := make([]data.TransactionHandler, 0) - txs = append(txs, tx.(data.TransactionHandler)) - err = interProc.AddIntermediateTransactions(txs) - assert.Nil(t, err) - - body := block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) + adrConv := &mock.AddressConverterMock{} + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + adrConv, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + tdp, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + container, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + sndAddr := []byte("0") + rcvAddr := []byte("1") + scr := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: sndAddr, RcvAddr: rcvAddr} + scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + + shardCoordinator.ComputeIdCalled = func(address state.AddressContainer) uint32 { + if bytes.Equal(address.Bytes(), sndAddr) { + return shardCoordinator.SelfId() + } + if bytes.Equal(address.Bytes(), rcvAddr) { + return shardCoordinator.SelfId() + 1 + } + return shardCoordinator.SelfId() + 2 + } + + tdp.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, scrHash) { + return scr, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, scrHash) { + return scr, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } + + interProc, _ := container.Get(block.SmartContractResultBlock) + tx, _ := tdp.UnsignedTransactions().SearchFirstData(scrHash) + txs := make([]data.TransactionHandler, 0) + txs = append(txs, tx.(data.TransactionHandler)) + err = interProc.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + body := block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) } func TestTransactionCoordinator_SaveBlockDataToStorageSaveIntermediateTxsErrors(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - retError := errors.New("save error") - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{ - KeysCalled: func() []block.Type { - return []block.Type{block.SmartContractResultBlock} - }, - GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { - if key == block.SmartContractResultBlock { - return &mock.IntermediateTransactionHandlerMock{ - SaveCurrentIntermediateTxToStorageCalled: func() error { - return retError - }, - }, nil - } - return nil, errors.New("invalid handler type") - }, - }, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, retError, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + retError := errors.New("save error") + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{ + KeysCalled: func() []block.Type { + return []block.Type{block.SmartContractResultBlock} + }, + GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { + if key == block.SmartContractResultBlock { + return &mock.IntermediateTransactionHandlerMock{ + SaveCurrentIntermediateTxToStorageCalled: func() error { + return retError + }, + }, nil + } + return nil, errors.New("invalid handler type") + }, + }, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, retError, err) } func TestTransactionCoordinator_SaveBlockDataToStorageCallsSaveIntermediate(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - intermediateTxWereSaved := false - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{ - KeysCalled: func() []block.Type { - return []block.Type{block.SmartContractResultBlock} - }, - GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { - if key == block.SmartContractResultBlock { - return &mock.IntermediateTransactionHandlerMock{ - SaveCurrentIntermediateTxToStorageCalled: func() error { - intermediateTxWereSaved = true - return nil - }, - }, nil - } - return nil, errors.New("invalid handler type") - }, - }, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - - assert.True(t, intermediateTxWereSaved) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + intermediateTxWereSaved := false + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{ + KeysCalled: func() []block.Type { + return []block.Type{block.SmartContractResultBlock} + }, + GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { + if key == block.SmartContractResultBlock { + return &mock.IntermediateTransactionHandlerMock{ + SaveCurrentIntermediateTxToStorageCalled: func() error { + intermediateTxWereSaved = true + return nil + }, + }, nil + } + return nil, errors.New("invalid handler type") + }, + }, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + + assert.True(t, intermediateTxWereSaved) } diff --git a/process/errors.go b/process/errors.go index 9f32c03795a..34c6d052b4c 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1,7 +1,7 @@ package process import ( - "errors" + "errors" ) // ErrNilMessage signals that a nil message has been received diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index af20dad71e3..d78da24f9f3 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -1,494 +1,494 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/interceptors" - "github.com/ElrondNetwork/elrond-go/process/dataValidators" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/process/unsigned" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go/process/dataValidators" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/process/unsigned" + "github.com/ElrondNetwork/elrond-go/sharding" ) type interceptorsContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - nodesCoordinator sharding.NodesCoordinator + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - messenger process.TopicHandler, - store dataRetriever.StorageService, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - keyGen crypto.KeyGenerator, - singleSigner crypto.SingleSigner, - multiSigner crypto.MultiSigner, - dataPool dataRetriever.PoolsHolder, - addrConverter state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + messenger process.TopicHandler, + store dataRetriever.StorageService, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + keyGen crypto.KeyGenerator, + singleSigner crypto.SingleSigner, + multiSigner crypto.MultiSigner, + dataPool dataRetriever.PoolsHolder, + addrConverter state.AddressConverter, ) (*interceptorsContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if messenger == nil { - return nil, process.ErrNilMessenger - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilBlockChain - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if keyGen == nil || keyGen.IsInterfaceNil() { - return nil, process.ErrNilKeyGen - } - if singleSigner == nil || singleSigner.IsInterfaceNil() { - return nil, process.ErrNilSingleSigner - } - if multiSigner == nil || multiSigner.IsInterfaceNil() { - return nil, process.ErrNilMultiSigVerifier - } - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if addrConverter == nil || addrConverter.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { - return nil, process.ErrNilNodesCoordinator - } - - return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if messenger == nil { + return nil, process.ErrNilMessenger + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilBlockChain + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if keyGen == nil || keyGen.IsInterfaceNil() { + return nil, process.ErrNilKeyGen + } + if singleSigner == nil || singleSigner.IsInterfaceNil() { + return nil, process.ErrNilSingleSigner + } + if multiSigner == nil || multiSigner.IsInterfaceNil() { + return nil, process.ErrNilMultiSigVerifier + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator + } + + return &interceptorsContainerFactory{ + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, + }, nil } // Create returns an interceptor container that will hold all interceptors in the system func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { - container := containers.NewInterceptorsContainer() - - keys, interceptorSlice, err := icf.generateTxInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateUnsignedTxsInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateRewardTxInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateHdrInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateMiniBlocksInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generatePeerChBlockBodyInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateMetachainHeaderInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - return container, nil + container := containers.NewInterceptorsContainer() + + keys, interceptorSlice, err := icf.generateTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateUnsignedTxsInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateRewardTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateHdrInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateMiniBlocksInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generatePeerChBlockBodyInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateMetachainHeaderInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + return container, nil } func (icf *interceptorsContainerFactory) createTopicAndAssignHandler( - topic string, - interceptor process.Interceptor, - createChannel bool, + topic string, + interceptor process.Interceptor, + createChannel bool, ) (process.Interceptor, error) { - err := icf.messenger.CreateTopic(topic, createChannel) - if err != nil { - return nil, err - } + err := icf.messenger.CreateTopic(topic, createChannel) + if err != nil { + return nil, err + } - return interceptor, icf.messenger.RegisterMessageProcessor(topic, interceptor) + return interceptor, icf.messenger.RegisterMessageProcessor(topic, interceptor) } //------- Tx interceptors func (icf *interceptorsContainerFactory) generateTxInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierTx - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierTx + interceptorSlice[int(idx)] = interceptor + } - //tx interceptor for metachain topic - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + //tx interceptor for metachain topic + identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier string) (process.Interceptor, error) { - //TODO implement other TxHandlerProcessValidator that will check the tx nonce against account's nonce - txValidator, err := dataValidators.NewNilTxValidator() - if err != nil { - return nil, err - } - - interceptor, err := transaction.NewTxInterceptor( - icf.marshalizer, - icf.dataPool.Transactions(), - txValidator, - icf.addrConverter, - icf.hasher, - icf.singleSigner, - icf.keyGen, - icf.shardCoordinator) - - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + //TODO implement other TxHandlerProcessValidator that will check the tx nonce against account's nonce + txValidator, err := dataValidators.NewNilTxValidator() + if err != nil { + return nil, err + } + + interceptor, err := transaction.NewTxInterceptor( + icf.marshalizer, + icf.dataPool.Transactions(), + txValidator, + icf.addrConverter, + icf.hasher, + icf.singleSigner, + icf.keyGen, + icf.shardCoordinator) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Reward transactions interceptors func (icf *interceptorsContainerFactory) generateRewardTxInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierScr - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } - identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(identifier string) (process.Interceptor, error) { - rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) - - interceptor, err := rewardTransaction.NewRewardTxInterceptor( - icf.marshalizer, - icf.dataPool.RewardTransactions(), - rewardTxStorer, - icf.addrConverter, - icf.hasher, - icf.shardCoordinator, - ) - - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) + + interceptor, err := rewardTransaction.NewRewardTxInterceptor( + icf.marshalizer, + icf.dataPool.RewardTransactions(), + rewardTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator, + ) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Unsigned transactions interceptors func (icf *interceptorsContainerFactory) generateUnsignedTxsInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierScr := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneUnsignedTxInterceptor(identifierScr) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneUnsignedTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierScr - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } - identifierTx := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + identifierTx := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneUnsignedTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneUnsignedTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneUnsignedTxInterceptor(identifier string) (process.Interceptor, error) { - uTxStorer := icf.store.GetStorer(dataRetriever.UnsignedTransactionUnit) + uTxStorer := icf.store.GetStorer(dataRetriever.UnsignedTransactionUnit) - interceptor, err := unsigned.NewUnsignedTxInterceptor( - icf.marshalizer, - icf.dataPool.UnsignedTransactions(), - uTxStorer, - icf.addrConverter, - icf.hasher, - icf.shardCoordinator) + interceptor, err := unsigned.NewUnsignedTxInterceptor( + icf.marshalizer, + icf.dataPool.UnsignedTransactions(), + uTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Hdr interceptor func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, nil, err - } - - //only one intrashard header topic - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - interceptor, err := interceptors.NewHeaderInterceptor( - icf.marshalizer, - icf.dataPool.Headers(), - icf.dataPool.HeadersNonces(), - hdrValidator, - icf.multiSigner, - icf.hasher, - icf.shardCoordinator, - icf.nodesCoordinator, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []process.Interceptor{interceptor}, nil + shardC := icf.shardCoordinator + //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce + // against blockchain's latest nonce - k finality + hdrValidator, err := dataValidators.NewNilHeaderValidator() + if err != nil { + return nil, nil, err + } + + //only one intrashard header topic + identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + interceptor, err := interceptors.NewHeaderInterceptor( + icf.marshalizer, + icf.dataPool.Headers(), + icf.dataPool.HeadersNonces(), + hdrValidator, + icf.multiSigner, + icf.hasher, + icf.shardCoordinator, + icf.nodesCoordinator, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []process.Interceptor{interceptor}, nil } //------- MiniBlocks interceptors func (icf *interceptorsContainerFactory) generateMiniBlocksInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + shardC := icf.shardCoordinator + noOfShards := shardC.NumberOfShards() + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierMiniBlocks - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierMiniBlocks + interceptorSlice[int(idx)] = interceptor + } - return keys, interceptorSlice, nil + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(identifier string) (process.Interceptor, error) { - txBlockBodyStorer := icf.store.GetStorer(dataRetriever.MiniBlockUnit) + txBlockBodyStorer := icf.store.GetStorer(dataRetriever.MiniBlockUnit) - interceptor, err := interceptors.NewTxBlockBodyInterceptor( - icf.marshalizer, - icf.dataPool.MiniBlocks(), - txBlockBodyStorer, - icf.hasher, - icf.shardCoordinator, - ) + interceptor, err := interceptors.NewTxBlockBodyInterceptor( + icf.marshalizer, + icf.dataPool.MiniBlocks(), + txBlockBodyStorer, + icf.hasher, + icf.shardCoordinator, + ) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- PeerChBlocks interceptor func (icf *interceptorsContainerFactory) generatePeerChBlockBodyInterceptor() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - - //only one intrashard peer change blocks topic - identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - peerBlockBodyStorer := icf.store.GetStorer(dataRetriever.PeerChangesUnit) - - interceptor, err := interceptors.NewPeerBlockBodyInterceptor( - icf.marshalizer, - icf.dataPool.PeerChangesBlocks(), - peerBlockBodyStorer, - icf.hasher, - shardC, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierPeerCh, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierPeerCh}, []process.Interceptor{interceptor}, nil + shardC := icf.shardCoordinator + + //only one intrashard peer change blocks topic + identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + peerBlockBodyStorer := icf.store.GetStorer(dataRetriever.PeerChangesUnit) + + interceptor, err := interceptors.NewPeerBlockBodyInterceptor( + icf.marshalizer, + icf.dataPool.PeerChangesBlocks(), + peerBlockBodyStorer, + icf.hasher, + shardC, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierPeerCh, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierPeerCh}, []process.Interceptor{interceptor}, nil } //------- MetachainHeader interceptors func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([]string, []process.Interceptor, error) { - identifierHdr := factory.MetachainBlocksTopic - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, nil, err - } - - interceptor, err := interceptors.NewMetachainHeaderInterceptor( - icf.marshalizer, - icf.dataPool.MetaBlocks(), - icf.dataPool.HeadersNonces(), - hdrValidator, - icf.multiSigner, - icf.hasher, - icf.shardCoordinator, - icf.nodesCoordinator, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []process.Interceptor{interceptor}, nil + identifierHdr := factory.MetachainBlocksTopic + //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce + // against blockchain's latest nonce - k finality + hdrValidator, err := dataValidators.NewNilHeaderValidator() + if err != nil { + return nil, nil, err + } + + interceptor, err := interceptors.NewMetachainHeaderInterceptor( + icf.marshalizer, + icf.dataPool.MetaBlocks(), + icf.dataPool.HeadersNonces(), + hdrValidator, + icf.multiSigner, + icf.hasher, + icf.shardCoordinator, + icf.nodesCoordinator, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []process.Interceptor{interceptor}, nil } // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { - if icf == nil { - return true - } - return false + if icf == nil { + return true + } + return false } diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 9d762fbdf14..1032e3509e4 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -1,649 +1,649 @@ package shard_test import ( - "errors" - "strings" - "testing" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "errors" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) var errExpected = errors.New("expected error") func createStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) process.TopicHandler { - return &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - if matchStrToErrOnCreate == "" { - return nil - } - if strings.Contains(name, matchStrToErrOnCreate) { - return errExpected - } - - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - if matchStrToErrOnRegister == "" { - return nil - } - if strings.Contains(topic, matchStrToErrOnRegister) { - return errExpected - } - - return nil - }, - } + return &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if matchStrToErrOnCreate == "" { + return nil + } + if strings.Contains(name, matchStrToErrOnCreate) { + return errExpected + } + + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + if matchStrToErrOnRegister == "" { + return nil + } + if strings.Contains(topic, matchStrToErrOnRegister) { + return errExpected + } + + return nil + }, + } } func createDataPools() dataRetriever.PoolsHolder { - pools := &mock.PoolsHolderStub{} - pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - } - pools.MiniBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - return pools + pools := &mock.PoolsHolderStub{} + pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.HeadersCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + } + pools.MiniBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + return pools } func createStore() *mock.ChainStorerMock { - return &mock.ChainStorerMock{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{} - }, - } + return &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{} + }, + } } //------- NewInterceptorsContainerFactory func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - nil, - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + nil, + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - nil, - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilNodesCoordinator, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) } func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - nil, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + nil, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMessenger, err) } func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - nil, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilBlockChain, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - nil, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMarshalizer, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + nil, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - nil, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilHasher, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + nil, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilHasher, err) } func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - nil, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilKeyGen, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilKeyGen, err) } func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - nil, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilSingleSigner, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + nil, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilSingleSigner, err) } func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - nil, - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMultiSigVerifier, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + nil, + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMultiSigVerifier, err) } func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - nil, - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + nil, + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - nil, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilAddressConverter, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilAddressConverter, err) } func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.NotNil(t, icf) - assert.Nil(t, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.NotNil(t, icf) + assert.Nil(t, err) } //------- Create func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.TransactionTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.TransactionTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.HeadersTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.HeadersTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.MiniBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.MiniBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.PeerChBodyTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.PeerChBodyTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.MetachainBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.MetachainBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.TransactionTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.TransactionTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.HeadersTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.HeadersTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.MiniBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.MiniBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.PeerChBodyTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.PeerChBodyTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.MetachainBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.MetachainBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - return nil - }, - }, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.NotNil(t, container) - assert.Nil(t, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return nil + }, + }, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.NotNil(t, container) + assert.Nil(t, err) } func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { - t.Parallel() - - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &mock.NodesCoordinatorMock{ - ShardId: 1, - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - } - - icf, _ := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - return nil - }, - }, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, _ := icf.Create() - - numInterceptorTxs := noOfShards + 1 - numInterceptorsUnsignedTxs := numInterceptorTxs - numInterceptorsRewardTxs := numInterceptorTxs - numInterceptorHeaders := 1 - numInterceptorMiniBlocks := noOfShards - numInterceptorPeerChanges := 1 - numInterceptorMetachainHeaders := 1 - totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + - numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + - numInterceptorsRewardTxs - - assert.Equal(t, totalInterceptors, container.Len()) + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + icf, _ := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return nil + }, + }, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, _ := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := numInterceptorTxs + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + numInterceptorPeerChanges := 1 + numInterceptorMetachainHeaders := 1 + totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + + numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + + numInterceptorsRewardTxs + + assert.Equal(t, totalInterceptors, container.Len()) } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 53de299d85b..a0e7d54ed70 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -1,129 +1,129 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" - "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/sharding" ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConverter state.AddressConverter - specialAddressHandler process.SpecialAddressHandler - store dataRetriever.StorageService - poolsHolder dataRetriever.PoolsHolder + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConverter state.AddressConverter + specialAddressHandler process.SpecialAddressHandler + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object func NewIntermediateProcessorsContainerFactory( - shardCoordinator sharding.Coordinator, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - addrConverter state.AddressConverter, - specialAddressHandler process.SpecialAddressHandler, - store dataRetriever.StorageService, - poolsHolder dataRetriever.PoolsHolder, + shardCoordinator sharding.Coordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConverter state.AddressConverter, + specialAddressHandler process.SpecialAddressHandler, + store dataRetriever.StorageService, + poolsHolder dataRetriever.PoolsHolder, ) (*intermediateProcessorsContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if addrConverter == nil || addrConverter.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil(){ - return nil, process.ErrNilSpecialAddressHandler - } - if store == nil || store.IsInterfaceNil(){ - return nil, process.ErrNilStorage - } - if poolsHolder == nil { - return nil, process.ErrNilPoolsHolder - } - - return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - addrConverter: addrConverter, - specialAddressHandler: specialAddressHandler, - store: store, - poolsHolder: poolsHolder, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if poolsHolder == nil { + return nil, process.ErrNilPoolsHolder + } + + return &intermediateProcessorsContainerFactory{ + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + addrConverter: addrConverter, + specialAddressHandler: specialAddressHandler, + store: store, + poolsHolder: poolsHolder, + }, nil } // Create returns a preprocessor container that will hold all preprocessors in the system func (ppcm *intermediateProcessorsContainerFactory) Create() (process.IntermediateProcessorContainer, error) { - container := containers.NewIntermediateTransactionHandlersContainer() + container := containers.NewIntermediateTransactionHandlersContainer() - interproc, err := ppcm.createSmartContractResultsIntermediateProcessor() - if err != nil { - return nil, err - } + interproc, err := ppcm.createSmartContractResultsIntermediateProcessor() + if err != nil { + return nil, err + } - err = container.Add(block.SmartContractResultBlock, interproc) - if err != nil { - return nil, err - } + err = container.Add(block.SmartContractResultBlock, interproc) + if err != nil { + return nil, err + } - interproc, err = ppcm.createRewardsTxIntermediateProcessor() - if err != nil { - return nil, err - } + interproc, err = ppcm.createRewardsTxIntermediateProcessor() + if err != nil { + return nil, err + } - err = container.Add(block.RewardsBlock, interproc) - if err != nil { - return nil, err - } + err = container.Add(block.RewardsBlock, interproc) + if err != nil { + return nil, err + } - return container, nil + return container, nil } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewIntermediateResultsProcessor( - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.addrConverter, - ppcm.store, - block.SmartContractResultBlock, - ) - - return irp, err + irp, err := preprocess.NewIntermediateResultsProcessor( + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + block.SmartContractResultBlock, + ) + + return irp, err } func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewRewardTxHandler( - ppcm.specialAddressHandler, - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.addrConverter, - ppcm.store, - ppcm.poolsHolder.RewardTransactions(), - ) - - return irp, err + irp, err := preprocess.NewRewardTxHandler( + ppcm.specialAddressHandler, + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + ppcm.poolsHolder.RewardTransactions(), + ) + + return irp, err } // IsInterfaceNil returns true if there is no value under the interface func (ppcm *intermediateProcessorsContainerFactory) IsInterfaceNil() bool { - if ppcm == nil { - return true - } - return false -} \ No newline at end of file + if ppcm == nil { + return true + } + return false +} diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 3c596728885..f2315293152 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -1,139 +1,139 @@ package shard_test import ( - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" - "testing" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" + "testing" ) func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - nil, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - nil, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilMarshalizer, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - nil, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilHasher, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilHasher, err) } func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - nil, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilAddressConverter, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilAddressConverter, err) } func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - nil, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilStorage, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + nil, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilStorage, err) } func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, err) - assert.NotNil(t, ipcf) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, err) + assert.NotNil(t, ipcf) } func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, err) - assert.NotNil(t, ipcf) - - container, err := ipcf.Create() - assert.Nil(t, err) - assert.Equal(t, 2, container.Len()) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, err) + assert.NotNil(t, ipcf) + + container, err := ipcf.Create() + assert.Nil(t, err) + assert.Equal(t, 2, container.Len()) } diff --git a/process/interface.go b/process/interface.go index 740cfb23151..a162dbcf33e 100644 --- a/process/interface.go +++ b/process/interface.go @@ -31,7 +31,7 @@ type RewardTransactionProcessor interface { // RewardTransactionPreProcessor prepares the processing of reward transactions type RewardTransactionPreProcessor interface { - AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) + AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) IsInterfaceNil() bool } @@ -120,11 +120,11 @@ type SpecialAddressHandler interface { ElrondCommunityAddress() []byte SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) ConsensusRewardAddresses() []string - LeaderAddress() []byte - BurnAddress() []byte - ShardIdForAddress([]byte) (uint32, error) - Round() uint64 - Epoch() uint32 + LeaderAddress() []byte + BurnAddress() []byte + ShardIdForAddress([]byte) (uint32, error) + Round() uint64 + Epoch() uint32 IsInterfaceNil() bool } @@ -152,18 +152,18 @@ type PreProcessor interface { // BlockProcessor is the main interface for block execution engine type BlockProcessor interface { - ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountState() - CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBody(dta []byte) data.BodyHandler - DecodeBlockHeader(dta []byte) data.HeaderHandler - AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) - IsInterfaceNil() bool + ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountState() + CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBody(dta []byte) data.BodyHandler + DecodeBlockHeader(dta []byte) data.HeaderHandler + AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) + IsInterfaceNil() bool } // Checker provides functionality to checks the integrity and validity of a data structure diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 3e3687761d0..19d7073f4dd 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -1,80 +1,80 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - noShards uint32 - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + noShards uint32 + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } func (bpm *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return bpm.ProcessBlockCalled(blockChain, header, body, haveTime) + return bpm.ProcessBlockCalled(blockChain, header, body, haveTime) } func (bpm *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return bpm.CommitBlockCalled(blockChain, header, body) + return bpm.CommitBlockCalled(blockChain, header, body) } func (bpm *BlockProcessorMock) RevertAccountState() { - bpm.RevertAccountStateCalled() + bpm.RevertAccountStateCalled() } func (blProcMock BlockProcessorMock) CreateGenesisBlock(balances map[string]*big.Int) (data.HeaderHandler, error) { - return blProcMock.CreateGenesisBlockCalled(balances) + return blProcMock.CreateGenesisBlockCalled(balances) } func (blProcMock BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string, uint64) { - panic("implement me") + panic("implement me") } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go index 8a97f316786..774bc2e7b64 100644 --- a/process/rewardTransaction/interceptedRewardTransaction.go +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -1,149 +1,149 @@ package rewardTransaction import ( - "math/big" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) // InterceptedRewardTransaction holds and manages a transaction based struct with extended functionality type InterceptedRewardTransaction struct { - rTx *rewardTx.RewardTx - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConv state.AddressConverter - coordinator sharding.Coordinator - hash []byte - rcvShard uint32 - sndShard uint32 - isAddressedToOtherShards bool + rTx *rewardTx.RewardTx + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConv state.AddressConverter + coordinator sharding.Coordinator + hash []byte + rcvShard uint32 + sndShard uint32 + isAddressedToOtherShards bool } // NewInterceptedRewardTransaction returns a new instance of InterceptedRewardTransaction func NewInterceptedRewardTransaction( - rewardTxBuff []byte, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - addrConv state.AddressConverter, - coordinator sharding.Coordinator, + rewardTxBuff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConv state.AddressConverter, + coordinator sharding.Coordinator, ) (*InterceptedRewardTransaction, error) { - if rewardTxBuff == nil { - return nil, process.ErrNilBuffer - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if addrConv == nil { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - rTx := &rewardTx.RewardTx{} - err := marshalizer.Unmarshal(rTx, rewardTxBuff) - if err != nil { - return nil, err - } - - inRewardTx := &InterceptedRewardTransaction{ - rTx: rTx, - marshalizer: marshalizer, - hasher: hasher, - addrConv: addrConv, - coordinator: coordinator, - } - - err = inRewardTx.processFields(rewardTxBuff) - if err != nil { - return nil, err - } - - err = inRewardTx.integrity() - if err != nil { - return nil, err - } - - err = inRewardTx.verifyIfNotarized(inRewardTx.hash) - if err != nil { - return nil, err - } - - return inRewardTx, nil + if rewardTxBuff == nil { + return nil, process.ErrNilBuffer + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if addrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rTx := &rewardTx.RewardTx{} + err := marshalizer.Unmarshal(rTx, rewardTxBuff) + if err != nil { + return nil, err + } + + inRewardTx := &InterceptedRewardTransaction{ + rTx: rTx, + marshalizer: marshalizer, + hasher: hasher, + addrConv: addrConv, + coordinator: coordinator, + } + + err = inRewardTx.processFields(rewardTxBuff) + if err != nil { + return nil, err + } + + err = inRewardTx.integrity() + if err != nil { + return nil, err + } + + err = inRewardTx.verifyIfNotarized(inRewardTx.hash) + if err != nil { + return nil, err + } + + return inRewardTx, nil } func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) error { - inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) + inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) - rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) - if err != nil { - return process.ErrInvalidRcvAddr - } + rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) + if err != nil { + return process.ErrInvalidRcvAddr + } - inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) - inRTx.sndShard = inRTx.rTx.ShardId + inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) + inRTx.sndShard = inRTx.rTx.ShardId - inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && - inRTx.sndShard != inRTx.coordinator.SelfId() + inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && + inRTx.sndShard != inRTx.coordinator.SelfId() - return nil + return nil } // integrity checks for not nil fields and negative value func (inRTx *InterceptedRewardTransaction) integrity() error { - if len(inRTx.rTx.RcvAddr) == 0 { - return process.ErrNilRcvAddr - } + if len(inRTx.rTx.RcvAddr) == 0 { + return process.ErrNilRcvAddr + } - if inRTx.rTx.Value == nil { - return process.ErrNilValue - } + if inRTx.rTx.Value == nil { + return process.ErrNilValue + } - if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { - return process.ErrNegativeValue - } + if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { + return process.ErrNegativeValue + } - return nil + return nil } // verifyIfNotarized checks if the rewardTx was already notarized func (inRTx *InterceptedRewardTransaction) verifyIfNotarized(rTxBuff []byte) error { - // TODO: implement this for flood protection purposes - // could verify if the epoch/round is behind last committed metachain block - return nil + // TODO: implement this for flood protection purposes + // could verify if the epoch/round is behind last committed metachain block + return nil } // RcvShard returns the receiver shard func (inRTx *InterceptedRewardTransaction) RcvShard() uint32 { - return inRTx.rcvShard + return inRTx.rcvShard } // SndShard returns the sender shard func (inRTx *InterceptedRewardTransaction) SndShard() uint32 { - return inRTx.sndShard + return inRTx.sndShard } // IsAddressedToOtherShards returns true if this transaction is not meant to be processed by the node from this shard func (inRTx *InterceptedRewardTransaction) IsAddressedToOtherShards() bool { - return inRTx.isAddressedToOtherShards + return inRTx.isAddressedToOtherShards } // RewardTransaction returns the reward transaction pointer that actually holds the data func (inRTx *InterceptedRewardTransaction) RewardTransaction() data.TransactionHandler { - return inRTx.rTx + return inRTx.rTx } // Hash gets the hash of this transaction func (inRTx *InterceptedRewardTransaction) Hash() []byte { - return inRTx.hash + return inRTx.hash } diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go index a0b855818fb..a10d3287aa4 100644 --- a/process/rewardTransaction/interceptor.go +++ b/process/rewardTransaction/interceptor.go @@ -1,151 +1,151 @@ package rewardTransaction import ( - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) var log = logger.DefaultLogger() // RewardTxInterceptor is used for intercepting reward transactions and storing them into a datapool type RewardTxInterceptor struct { - marshalizer marshal.Marshalizer - rewardTxPool dataRetriever.ShardedDataCacherNotifier - rewardTxStorer storage.Storer - addrConverter state.AddressConverter - hasher hashing.Hasher - shardCoordinator sharding.Coordinator - broadcastCallbackHandler func(buffToSend []byte) + marshalizer marshal.Marshalizer + rewardTxPool dataRetriever.ShardedDataCacherNotifier + rewardTxStorer storage.Storer + addrConverter state.AddressConverter + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + broadcastCallbackHandler func(buffToSend []byte) } // NewRewardTxInterceptor hooks a new interceptor for reward transactions func NewRewardTxInterceptor( - marshalizer marshal.Marshalizer, - rewardTxPool dataRetriever.ShardedDataCacherNotifier, - rewardTxStorer storage.Storer, - addrConverter state.AddressConverter, - hasher hashing.Hasher, - shardCoordinator sharding.Coordinator, + marshalizer marshal.Marshalizer, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + rewardTxStorer storage.Storer, + addrConverter state.AddressConverter, + hasher hashing.Hasher, + shardCoordinator sharding.Coordinator, ) (*RewardTxInterceptor, error) { - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if rewardTxPool == nil { - return nil, process.ErrNilRewardTxDataPool - } - if rewardTxStorer == nil { - return nil, process.ErrNilRewardsTxStorage - } - if addrConverter == nil { - return nil, process.ErrNilAddressConverter - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - rewardTxIntercept := &RewardTxInterceptor{ - marshalizer: marshalizer, - rewardTxPool: rewardTxPool, - rewardTxStorer: rewardTxStorer, - hasher: hasher, - addrConverter: addrConverter, - shardCoordinator: shardCoordinator, - } - - return rewardTxIntercept, nil + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if rewardTxPool == nil { + return nil, process.ErrNilRewardTxDataPool + } + if rewardTxStorer == nil { + return nil, process.ErrNilRewardsTxStorage + } + if addrConverter == nil { + return nil, process.ErrNilAddressConverter + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rewardTxIntercept := &RewardTxInterceptor{ + marshalizer: marshalizer, + rewardTxPool: rewardTxPool, + rewardTxStorer: rewardTxStorer, + hasher: hasher, + addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + } + + return rewardTxIntercept, nil } // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { - if message == nil { - return process.ErrNilMessage - } - - if message.Data() == nil { - return process.ErrNilDataToProcess - } - - rewardTxsBuff := make([][]byte, 0) - err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) - if err != nil { - return err - } - if len(rewardTxsBuff) == 0 { - return process.ErrNoRewardTransactionInMessage - } - - filteredRTxBuffs := make([][]byte, 0) - lastErrEncountered := error(nil) - for _, rewardTxBuff := range rewardTxsBuff { - rewardTxIntercepted, err := NewInterceptedRewardTransaction( - rewardTxBuff, - rti.marshalizer, - rti.hasher, - rti.addrConverter, - rti.shardCoordinator) - - if err != nil { - lastErrEncountered = err - continue - } - - //reward tx is validated, add it to filtered out reward txs - filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) - if rewardTxIntercepted.IsAddressedToOtherShards() { - log.Debug("intercepted reward transaction is for other shards") - - continue - } - - go rti.processRewardTransaction(rewardTxIntercepted) - } - - var buffToSend []byte - filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil - if filteredOutRTxsNeedToBeSend { - buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) - if err != nil { - return err - } - } - - if rti.broadcastCallbackHandler != nil { - rti.broadcastCallbackHandler(buffToSend) - } - - return lastErrEncountered + if message == nil { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + rewardTxsBuff := make([][]byte, 0) + err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) + if err != nil { + return err + } + if len(rewardTxsBuff) == 0 { + return process.ErrNoRewardTransactionInMessage + } + + filteredRTxBuffs := make([][]byte, 0) + lastErrEncountered := error(nil) + for _, rewardTxBuff := range rewardTxsBuff { + rewardTxIntercepted, err := NewInterceptedRewardTransaction( + rewardTxBuff, + rti.marshalizer, + rti.hasher, + rti.addrConverter, + rti.shardCoordinator) + + if err != nil { + lastErrEncountered = err + continue + } + + //reward tx is validated, add it to filtered out reward txs + filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) + if rewardTxIntercepted.IsAddressedToOtherShards() { + log.Debug("intercepted reward transaction is for other shards") + + continue + } + + go rti.processRewardTransaction(rewardTxIntercepted) + } + + var buffToSend []byte + filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil + if filteredOutRTxsNeedToBeSend { + buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) + if err != nil { + return err + } + } + + if rti.broadcastCallbackHandler != nil { + rti.broadcastCallbackHandler(buffToSend) + } + + return lastErrEncountered } // SetBroadcastCallback sets the callback method to send filtered out message func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend []byte)) { - rti.broadcastCallbackHandler = callback + rti.broadcastCallbackHandler = callback } func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { - cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) - rti.rewardTxPool.AddData( - rTx.Hash(), - rTx.RewardTransaction(), - cacherIdentifier, - ) + cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) + rti.rewardTxPool.AddData( + rTx.Hash(), + rTx.RewardTransaction(), + cacherIdentifier, + ) } // IsInterfaceNil returns true if there is no value under the interface func (rti *RewardTxInterceptor) IsInterfaceNil() bool { - if rti == nil { - return true - } - return false + if rti == nil { + return true + } + return false } diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index e95d9b9a9ed..75a9209be24 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,8 +1,8 @@ package rewardTransaction import ( - "math/big" - "sync" + "math/big" + "sync" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/rewardTx" @@ -16,8 +16,8 @@ type rewardTxProcessor struct { adrConv state.AddressConverter shardCoordinator sharding.Coordinator - mutRewardsForwarder sync.Mutex - rewardTxForwarder process.IntermediateTransactionHandler + mutRewardsForwarder sync.Mutex + rewardTxForwarder process.IntermediateTransactionHandler } // NewRewardTxProcessor creates a rewardTxProcessor instance @@ -26,7 +26,7 @@ func NewRewardTxProcessor( accountsDB state.AccountsAdapter, adrConv state.AddressConverter, coordinator sharding.Coordinator, - rewardTxForwarder process.IntermediateTransactionHandler, + rewardTxForwarder process.IntermediateTransactionHandler, ) (*rewardTxProcessor, error) { if accountsDB == nil { return nil, process.ErrNilAccountsAdapter @@ -38,12 +38,12 @@ func NewRewardTxProcessor( return nil, process.ErrNilShardCoordinator } - return &rewardTxProcessor{ - accounts: accountsDB, - adrConv: adrConv, - shardCoordinator: coordinator, - rewardTxForwarder: rewardTxForwarder, - }, nil + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + rewardTxForwarder: rewardTxForwarder, + }, nil } func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { @@ -68,40 +68,40 @@ func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.Accou // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if rTx == nil { - return process.ErrNilRewardTransaction - } - if rTx.Value == nil { - return process.ErrNilValueFromRewardTransaction - } - - rtp.mutRewardsForwarder.Lock() - err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) - rtp.mutRewardsForwarder.Unlock() - if err != nil { - return err - } - - accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) - if err != nil { - return err - } - - if accHandler == nil || accHandler.IsInterfaceNil() { - // address from different shard - return nil - } + if rTx == nil { + return process.ErrNilRewardTransaction + } + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + rtp.mutRewardsForwarder.Lock() + err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + if err != nil { + return err + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + + if accHandler == nil || accHandler.IsInterfaceNil() { + // address from different shard + return nil + } rewardAcc, ok := accHandler.(*state.Account) if !ok { return process.ErrWrongTypeAssertion } - operation := big.NewInt(0) - operation = operation.Add(rTx.Value, rewardAcc.Balance) - err = rewardAcc.SetBalanceWithJournal(operation) + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) - return err + return err } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/smartContract/process.go b/process/smartContract/process.go index cfd5b36965d..0d3d1d8e3d5 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -1,808 +1,808 @@ package smartContract import ( - "bytes" - "encoding/hex" - "fmt" - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "bytes" + "encoding/hex" + "fmt" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) type scExecutionState struct { - allLogs map[string][]*vmcommon.LogEntry - allReturnData map[string][]*big.Int - returnCodes map[string]vmcommon.ReturnCode - rootHash []byte + allLogs map[string][]*vmcommon.LogEntry + allReturnData map[string][]*big.Int + returnCodes map[string]vmcommon.ReturnCode + rootHash []byte } type scProcessor struct { - accounts state.AccountsAdapter - tempAccounts process.TemporaryAccountsHandler - adrConv state.AddressConverter - hasher hashing.Hasher - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - vmContainer process.VirtualMachinesContainer - argsParser process.ArgumentsParser - - mutSCState sync.Mutex - mapExecState map[uint64]scExecutionState - - scrForwarder process.IntermediateTransactionHandler - txFeeHandler process.TransactionFeeHandler + accounts state.AccountsAdapter + tempAccounts process.TemporaryAccountsHandler + adrConv state.AddressConverter + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + vmContainer process.VirtualMachinesContainer + argsParser process.ArgumentsParser + + mutSCState sync.Mutex + mapExecState map[uint64]scExecutionState + + scrForwarder process.IntermediateTransactionHandler + txFeeHandler process.TransactionFeeHandler } var log = logger.DefaultLogger() // NewSmartContractProcessor create a smart contract processor creates and interprets VM data func NewSmartContractProcessor( - vmContainer process.VirtualMachinesContainer, - argsParser process.ArgumentsParser, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - accountsDB state.AccountsAdapter, - tempAccounts process.TemporaryAccountsHandler, - adrConv state.AddressConverter, - coordinator sharding.Coordinator, - scrForwarder process.IntermediateTransactionHandler, - txFeeHandler process.TransactionFeeHandler, + vmContainer process.VirtualMachinesContainer, + argsParser process.ArgumentsParser, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + accountsDB state.AccountsAdapter, + tempAccounts process.TemporaryAccountsHandler, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, + scrForwarder process.IntermediateTransactionHandler, + txFeeHandler process.TransactionFeeHandler, ) (*scProcessor, error) { - if vmContainer == nil || vmContainer.IsInterfaceNil() { - return nil, process.ErrNoVM - } - if argsParser == nil || argsParser.IsInterfaceNil() { - return nil, process.ErrNilArgumentParser - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if accountsDB == nil || accountsDB.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if tempAccounts == nil || tempAccounts.IsInterfaceNil() { - return nil, process.ErrNilTemporaryAccountsHandler - } - if adrConv == nil || adrConv.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil || coordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if scrForwarder == nil || scrForwarder.IsInterfaceNil() { - return nil, process.ErrNilIntermediateTransactionHandler - } - if txFeeHandler == nil { - return nil, process.ErrNilUnsignedTxHandler - } - - return &scProcessor{ - vmContainer: vmContainer, - argsParser: argsParser, - hasher: hasher, - marshalizer: marshalizer, - accounts: accountsDB, - tempAccounts: tempAccounts, - adrConv: adrConv, - shardCoordinator: coordinator, - scrForwarder: scrForwarder, - txFeeHandler: txFeeHandler, - mapExecState: make(map[uint64]scExecutionState)}, nil + if vmContainer == nil || vmContainer.IsInterfaceNil() { + return nil, process.ErrNoVM + } + if argsParser == nil || argsParser.IsInterfaceNil() { + return nil, process.ErrNilArgumentParser + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if accountsDB == nil || accountsDB.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if tempAccounts == nil || tempAccounts.IsInterfaceNil() { + return nil, process.ErrNilTemporaryAccountsHandler + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if scrForwarder == nil || scrForwarder.IsInterfaceNil() { + return nil, process.ErrNilIntermediateTransactionHandler + } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } + + return &scProcessor{ + vmContainer: vmContainer, + argsParser: argsParser, + hasher: hasher, + marshalizer: marshalizer, + accounts: accountsDB, + tempAccounts: tempAccounts, + adrConv: adrConv, + shardCoordinator: coordinator, + scrForwarder: scrForwarder, + txFeeHandler: txFeeHandler, + mapExecState: make(map[uint64]scExecutionState)}, nil } // ComputeTransactionType calculates the type of the transaction func (sc *scProcessor) ComputeTransactionType(tx *transaction.Transaction) (process.TransactionType, error) { - err := sc.checkTxValidity(tx) - if err != nil { - return 0, err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if isEmptyAddress { - if len(tx.Data) > 0 { - return process.SCDeployment, nil - } - return 0, process.ErrWrongTransaction - } - - acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) - if err != nil { - return 0, err - } - - if acntDst == nil || acntDst.IsInterfaceNil() { - return process.MoveBalance, nil - } - - if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { - return process.SCInvoking, nil - } - - return process.MoveBalance, nil + err := sc.checkTxValidity(tx) + if err != nil { + return 0, err + } + + isEmptyAddress := sc.isDestAddressEmpty(tx) + if isEmptyAddress { + if len(tx.Data) > 0 { + return process.SCDeployment, nil + } + return 0, process.ErrWrongTransaction + } + + acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) + if err != nil { + return 0, err + } + + if acntDst == nil || acntDst.IsInterfaceNil() { + return process.MoveBalance, nil + } + + if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { + return process.SCInvoking, nil + } + + return process.MoveBalance, nil } func (sc *scProcessor) checkTxValidity(tx *transaction.Transaction) error { - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } - recvAddressIsInvalid := sc.adrConv.AddressLen() != len(tx.RcvAddr) - if recvAddressIsInvalid { - return process.ErrWrongTransaction - } + recvAddressIsInvalid := sc.adrConv.AddressLen() != len(tx.RcvAddr) + if recvAddressIsInvalid { + return process.ErrWrongTransaction + } - return nil + return nil } func (sc *scProcessor) isDestAddressEmpty(tx *transaction.Transaction) bool { - isEmptyAddress := bytes.Equal(tx.RcvAddr, make([]byte, sc.adrConv.AddressLen())) - return isEmptyAddress + isEmptyAddress := bytes.Equal(tx.RcvAddr, make([]byte, sc.adrConv.AddressLen())) + return isEmptyAddress } // ExecuteSmartContractTransaction processes the transaction, call the VM and processes the SC call output func (sc *scProcessor) ExecuteSmartContractTransaction( - tx *transaction.Transaction, - acntSnd, acntDst state.AccountHandler, - round uint64, + tx *transaction.Transaction, + acntSnd, acntDst state.AccountHandler, + round uint64, ) error { - defer sc.tempAccounts.CleanTempAccounts() - - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } - if acntDst == nil || acntDst.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - if acntDst.IsInterfaceNil() || acntDst.GetCode() == nil { - return process.ErrNilSCDestAccount - } - - err := sc.prepareSmartContractCall(tx, acntSnd) - if err != nil { - return err - } - - vmInput, err := sc.createVMCallInput(tx) - if err != nil { - return err - } - - vm, err := sc.getVMFromTransaction(tx) - if err != nil { - return err - } - - vmOutput, err := vm.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - // VM is formally verified and the output is correct - crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) - if err != nil { - return err - } - - err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) - if err != nil { - return err - } - - sc.txFeeHandler.ProcessTransactionFee(consumedFee) - - return nil + defer sc.tempAccounts.CleanTempAccounts() + + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } + if acntDst == nil || acntDst.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + if acntDst.IsInterfaceNil() || acntDst.GetCode() == nil { + return process.ErrNilSCDestAccount + } + + err := sc.prepareSmartContractCall(tx, acntSnd) + if err != nil { + return err + } + + vmInput, err := sc.createVMCallInput(tx) + if err != nil { + return err + } + + vm, err := sc.getVMFromTransaction(tx) + if err != nil { + return err + } + + vmOutput, err := vm.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + // VM is formally verified and the output is correct + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + if err != nil { + return err + } + + err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) + if err != nil { + return err + } + + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + + return nil } func (sc *scProcessor) prepareSmartContractCall(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - err := sc.argsParser.ParseData(tx.Data) - if err != nil { - return err - } - - err = sc.processSCPayment(tx, acntSnd) - if err != nil { - return err - } - - nonce := tx.Nonce - if acntSnd != nil && !acntSnd.IsInterfaceNil() { - nonce = acntSnd.GetNonce() - } - txValue := big.NewInt(0).Set(tx.Value) - sc.tempAccounts.AddTempAccount(tx.SndAddr, txValue, nonce) - - return nil + err := sc.argsParser.ParseData(tx.Data) + if err != nil { + return err + } + + err = sc.processSCPayment(tx, acntSnd) + if err != nil { + return err + } + + nonce := tx.Nonce + if acntSnd != nil && !acntSnd.IsInterfaceNil() { + nonce = acntSnd.GetNonce() + } + txValue := big.NewInt(0).Set(tx.Value) + sc.tempAccounts.AddTempAccount(tx.SndAddr, txValue, nonce) + + return nil } func (sc *scProcessor) getVMFromTransaction(tx *transaction.Transaction) (vmcommon.VMExecutionHandler, error) { - //TODO add processing here - like calculating what kind of VM does this contract call needs - vm, err := sc.vmContainer.Get([]byte(factory.IELEVirtualMachine)) - if err != nil { - return nil, err - } - return vm, nil + //TODO add processing here - like calculating what kind of VM does this contract call needs + vm, err := sc.vmContainer.Get([]byte(factory.IELEVirtualMachine)) + if err != nil { + return nil, err + } + return vm, nil } // DeploySmartContract processes the transaction, than deploy the smart contract into VM, final code is saved in account func (sc *scProcessor) DeploySmartContract( - tx *transaction.Transaction, - acntSnd state.AccountHandler, - round uint64, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, ) error { - defer sc.tempAccounts.CleanTempAccounts() - - err := sc.checkTxValidity(tx) - if err != nil { - return err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if !isEmptyAddress { - return process.ErrWrongTransaction - } - - err = sc.prepareSmartContractCall(tx, acntSnd) - if err != nil { - return err - } - - vmInput, err := sc.createVMDeployInput(tx) - if err != nil { - return err - } - - vm, err := sc.getVMFromTransaction(tx) - if err != nil { - return err - } - - // TODO: Smart contract address calculation - vmOutput, err := vm.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - - // VM is formally verified, the output is correct - crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) - if err != nil { - return err - } - - err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) - if err != nil { - return err - } - - sc.txFeeHandler.ProcessTransactionFee(consumedFee) - - return nil + defer sc.tempAccounts.CleanTempAccounts() + + err := sc.checkTxValidity(tx) + if err != nil { + return err + } + + isEmptyAddress := sc.isDestAddressEmpty(tx) + if !isEmptyAddress { + return process.ErrWrongTransaction + } + + err = sc.prepareSmartContractCall(tx, acntSnd) + if err != nil { + return err + } + + vmInput, err := sc.createVMDeployInput(tx) + if err != nil { + return err + } + + vm, err := sc.getVMFromTransaction(tx) + if err != nil { + return err + } + + // TODO: Smart contract address calculation + vmOutput, err := vm.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + + // VM is formally verified, the output is correct + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + if err != nil { + return err + } + + err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) + if err != nil { + return err + } + + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + + return nil } func (sc *scProcessor) createVMCallInput(tx *transaction.Transaction) (*vmcommon.ContractCallInput, error) { - vmInput, err := sc.createVMInput(tx) - if err != nil { - return nil, err - } + vmInput, err := sc.createVMInput(tx) + if err != nil { + return nil, err + } - vmCallInput := &vmcommon.ContractCallInput{} - vmCallInput.VMInput = *vmInput - vmCallInput.Function, err = sc.argsParser.GetFunction() - if err != nil { - return nil, err - } + vmCallInput := &vmcommon.ContractCallInput{} + vmCallInput.VMInput = *vmInput + vmCallInput.Function, err = sc.argsParser.GetFunction() + if err != nil { + return nil, err + } - vmCallInput.RecipientAddr = tx.RcvAddr + vmCallInput.RecipientAddr = tx.RcvAddr - return vmCallInput, nil + return vmCallInput, nil } func (sc *scProcessor) createVMDeployInput(tx *transaction.Transaction) (*vmcommon.ContractCreateInput, error) { - vmInput, err := sc.createVMInput(tx) - if err != nil { - return nil, err - } + vmInput, err := sc.createVMInput(tx) + if err != nil { + return nil, err + } - vmCreateInput := &vmcommon.ContractCreateInput{} - hexCode, err := sc.argsParser.GetCode() - if err != nil { - return nil, err - } + vmCreateInput := &vmcommon.ContractCreateInput{} + hexCode, err := sc.argsParser.GetCode() + if err != nil { + return nil, err + } - vmCreateInput.ContractCode, err = hex.DecodeString(string(hexCode)) - if err != nil { - return nil, err - } + vmCreateInput.ContractCode, err = hex.DecodeString(string(hexCode)) + if err != nil { + return nil, err + } - vmCreateInput.VMInput = *vmInput + vmCreateInput.VMInput = *vmInput - return vmCreateInput, nil + return vmCreateInput, nil } func (sc *scProcessor) createVMInput(tx *transaction.Transaction) (*vmcommon.VMInput, error) { - var err error - vmInput := &vmcommon.VMInput{} - - vmInput.CallerAddr = tx.SndAddr - vmInput.Arguments, err = sc.argsParser.GetArguments() - if err != nil { - return nil, err - } - vmInput.CallValue = tx.Value - vmInput.GasPrice = big.NewInt(int64(tx.GasPrice)) - vmInput.GasProvided = big.NewInt(int64(tx.GasLimit)) - - //TODO: change this when we know for what they are used. - scCallHeader := &vmcommon.SCCallHeader{} - scCallHeader.GasLimit = big.NewInt(0) - scCallHeader.Number = big.NewInt(0) - scCallHeader.Timestamp = big.NewInt(0) - scCallHeader.Beneficiary = big.NewInt(0) - - vmInput.Header = scCallHeader - - return vmInput, nil + var err error + vmInput := &vmcommon.VMInput{} + + vmInput.CallerAddr = tx.SndAddr + vmInput.Arguments, err = sc.argsParser.GetArguments() + if err != nil { + return nil, err + } + vmInput.CallValue = tx.Value + vmInput.GasPrice = big.NewInt(int64(tx.GasPrice)) + vmInput.GasProvided = big.NewInt(int64(tx.GasLimit)) + + //TODO: change this when we know for what they are used. + scCallHeader := &vmcommon.SCCallHeader{} + scCallHeader.GasLimit = big.NewInt(0) + scCallHeader.Number = big.NewInt(0) + scCallHeader.Timestamp = big.NewInt(0) + scCallHeader.Beneficiary = big.NewInt(0) + + vmInput.Header = scCallHeader + + return vmInput, nil } // taking money from sender, as VM might not have access to him because of state sharding func (sc *scProcessor) processSCPayment(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - // transaction was already done at sender shard - return nil - } - - err := acntSnd.SetNonceWithJournal(acntSnd.GetNonce() + 1) - if err != nil { - return err - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - cost = cost.Add(cost, tx.Value) - - if cost.Cmp(big.NewInt(0)) == 0 { - return nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if stAcc.Balance.Cmp(cost) < 0 { - return process.ErrInsufficientFunds - } - - totalCost := big.NewInt(0) - err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) - if err != nil { - return err - } - - return nil + if acntSnd == nil || acntSnd.IsInterfaceNil() { + // transaction was already done at sender shard + return nil + } + + err := acntSnd.SetNonceWithJournal(acntSnd.GetNonce() + 1) + if err != nil { + return err + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + cost = cost.Add(cost, tx.Value) + + if cost.Cmp(big.NewInt(0)) == 0 { + return nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + + totalCost := big.NewInt(0) + err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) + if err != nil { + return err + } + + return nil } func (sc *scProcessor) processVMOutput( - vmOutput *vmcommon.VMOutput, - tx *transaction.Transaction, - acntSnd state.AccountHandler, - round uint64, + vmOutput *vmcommon.VMOutput, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, ) ([]data.TransactionHandler, *big.Int, error) { - if vmOutput == nil { - return nil, nil, process.ErrNilVMOutput - } - if tx == nil { - return nil, nil, process.ErrNilTransaction - } - - txBytes, err := sc.marshalizer.Marshal(tx) - if err != nil { - return nil, nil, err - } - txHash := sc.hasher.Compute(string(txBytes)) - - if vmOutput.ReturnCode != vmcommon.Ok { - log.Info(fmt.Sprintf( - "error processing tx %s in VM: return code: %s", - hex.EncodeToString(txHash), - vmOutput.ReturnCode), - ) - } - - err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) - if err != nil { - return nil, nil, err - } - - crossOutAccs, err := sc.processSCOutputAccounts(vmOutput.OutputAccounts) - if err != nil { - return nil, nil, err - } - - crossTxs, err := sc.createCrossShardTransactions(crossOutAccs, tx, txHash) - if err != nil { - return nil, nil, err - } - - acntSnd, err = sc.reloadLocalSndAccount(acntSnd) - if err != nil { - return nil, nil, err - } - - totalGasRefund := big.NewInt(0) - totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) - scrIfCrossShard, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) - if err != nil { - return nil, nil, err - } - - if scrIfCrossShard != nil { - crossTxs = append(crossTxs, scrIfCrossShard) - } - - err = sc.deleteAccounts(vmOutput.DeletedAccounts) - if err != nil { - return nil, nil, err - } - - err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) - if err != nil { - return nil, nil, err - } - - return crossTxs, consumedFee, nil + if vmOutput == nil { + return nil, nil, process.ErrNilVMOutput + } + if tx == nil { + return nil, nil, process.ErrNilTransaction + } + + txBytes, err := sc.marshalizer.Marshal(tx) + if err != nil { + return nil, nil, err + } + txHash := sc.hasher.Compute(string(txBytes)) + + if vmOutput.ReturnCode != vmcommon.Ok { + log.Info(fmt.Sprintf( + "error processing tx %s in VM: return code: %s", + hex.EncodeToString(txHash), + vmOutput.ReturnCode), + ) + } + + err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) + if err != nil { + return nil, nil, err + } + + crossOutAccs, err := sc.processSCOutputAccounts(vmOutput.OutputAccounts) + if err != nil { + return nil, nil, err + } + + crossTxs, err := sc.createCrossShardTransactions(crossOutAccs, tx, txHash) + if err != nil { + return nil, nil, err + } + + acntSnd, err = sc.reloadLocalSndAccount(acntSnd) + if err != nil { + return nil, nil, err + } + + totalGasRefund := big.NewInt(0) + totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) + scrIfCrossShard, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) + if err != nil { + return nil, nil, err + } + + if scrIfCrossShard != nil { + crossTxs = append(crossTxs, scrIfCrossShard) + } + + err = sc.deleteAccounts(vmOutput.DeletedAccounts) + if err != nil { + return nil, nil, err + } + + err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) + if err != nil { + return nil, nil, err + } + + return crossTxs, consumedFee, nil } // reloadLocalSndAccount will reload from current account state the sender account // this requirement is needed because in the case of refunding the exact account that was previously // modified in saveSCOutputToCurrentState, the modifications done there should be visible here func (sc *scProcessor) reloadLocalSndAccount(acntSnd state.AccountHandler) (state.AccountHandler, error) { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - return acntSnd, nil - } + if acntSnd == nil || acntSnd.IsInterfaceNil() { + return acntSnd, nil + } - isAccountFromCurrentShard := acntSnd.AddressContainer() != nil - if !isAccountFromCurrentShard { - return acntSnd, nil - } + isAccountFromCurrentShard := acntSnd.AddressContainer() != nil + if !isAccountFromCurrentShard { + return acntSnd, nil + } - return sc.getAccountFromAddress(acntSnd.AddressContainer().Bytes()) + return sc.getAccountFromAddress(acntSnd.AddressContainer().Bytes()) } func (sc *scProcessor) createSmartContractResult( - outAcc *vmcommon.OutputAccount, - scAddress []byte, - txHash []byte, + outAcc *vmcommon.OutputAccount, + scAddress []byte, + txHash []byte, ) *smartContractResult.SmartContractResult { - crossSc := &smartContractResult.SmartContractResult{} + crossSc := &smartContractResult.SmartContractResult{} - crossSc.Value = outAcc.Balance - crossSc.Nonce = outAcc.Nonce.Uint64() - crossSc.RcvAddr = outAcc.Address - crossSc.SndAddr = scAddress - crossSc.Code = outAcc.Code - crossSc.Data = sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates) - crossSc.TxHash = txHash + crossSc.Value = outAcc.Balance + crossSc.Nonce = outAcc.Nonce.Uint64() + crossSc.RcvAddr = outAcc.Address + crossSc.SndAddr = scAddress + crossSc.Code = outAcc.Code + crossSc.Data = sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates) + crossSc.TxHash = txHash - return crossSc + return crossSc } func (sc *scProcessor) createCrossShardTransactions( - crossOutAccs []*vmcommon.OutputAccount, - tx *transaction.Transaction, - txHash []byte, + crossOutAccs []*vmcommon.OutputAccount, + tx *transaction.Transaction, + txHash []byte, ) ([]data.TransactionHandler, error) { - crossSCTxs := make([]data.TransactionHandler, 0) + crossSCTxs := make([]data.TransactionHandler, 0) - for i := 0; i < len(crossOutAccs); i++ { - scTx := sc.createSmartContractResult(crossOutAccs[i], tx.RcvAddr, txHash) - crossSCTxs = append(crossSCTxs, scTx) - } + for i := 0; i < len(crossOutAccs); i++ { + scTx := sc.createSmartContractResult(crossOutAccs[i], tx.RcvAddr, txHash) + crossSCTxs = append(crossSCTxs, scTx) + } - return crossSCTxs, nil + return crossSCTxs, nil } // give back the user the unused gas money func (sc *scProcessor) refundGasToSender( - gasRefund *big.Int, - tx *transaction.Transaction, - txHash []byte, - acntSnd state.AccountHandler, + gasRefund *big.Int, + tx *transaction.Transaction, + txHash []byte, + acntSnd state.AccountHandler, ) (*smartContractResult.SmartContractResult, *big.Int, error) { - consumedFee := big.NewInt(0) - consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { - return nil, consumedFee, nil - } - - refundErd := big.NewInt(0) - refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) - consumedFee = consumedFee.Sub(consumedFee, refundErd) - - scTx := &smartContractResult.SmartContractResult{} - scTx.Value = refundErd - scTx.RcvAddr = tx.SndAddr - scTx.SndAddr = tx.RcvAddr - scTx.Nonce = tx.Nonce + 1 - scTx.TxHash = txHash - - if acntSnd == nil || acntSnd.IsInterfaceNil() { - return scTx, consumedFee, nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) - err := stAcc.SetBalanceWithJournal(newBalance) - if err != nil { - return nil, nil, err - } - - return scTx, consumedFee, nil + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { + return nil, consumedFee, nil + } + + refundErd := big.NewInt(0) + refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) + consumedFee = consumedFee.Sub(consumedFee, refundErd) + + scTx := &smartContractResult.SmartContractResult{} + scTx.Value = refundErd + scTx.RcvAddr = tx.SndAddr + scTx.SndAddr = tx.RcvAddr + scTx.Nonce = tx.Nonce + 1 + scTx.TxHash = txHash + + if acntSnd == nil || acntSnd.IsInterfaceNil() { + return scTx, consumedFee, nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) + err := stAcc.SetBalanceWithJournal(newBalance) + if err != nil { + return nil, nil, err + } + + return scTx, consumedFee, nil } // save account changes in state from vmOutput - protected by VM - every output can be treated as is. func (sc *scProcessor) processSCOutputAccounts(outputAccounts []*vmcommon.OutputAccount) ([]*vmcommon.OutputAccount, error) { - crossOutAccs := make([]*vmcommon.OutputAccount, 0) - for i := 0; i < len(outputAccounts); i++ { - outAcc := outputAccounts[i] - acc, err := sc.getAccountFromAddress(outAcc.Address) - if err != nil { - return nil, err - } - - fakeAcc := sc.tempAccounts.TempAccount(outAcc.Address) - - if acc == nil || acc.IsInterfaceNil() { - crossOutAccs = append(crossOutAccs, outAcc) - continue - } - - for j := 0; j < len(outAcc.StorageUpdates); j++ { - storeUpdate := outAcc.StorageUpdates[j] - acc.DataTrieTracker().SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - } - - if len(outAcc.StorageUpdates) > 0 { - //SC with data variables - err := sc.accounts.SaveDataTrie(acc) - if err != nil { - return nil, err - } - } - - if len(outAcc.Code) > 0 { - err = sc.accounts.PutCode(acc, outAcc.Code) - if err != nil { - return nil, err - } - - //TODO remove this when receipts are implemented - log.Info(fmt.Sprintf("*** Generated/called SC account: %s ***", hex.EncodeToString(outAcc.Address))) - } - - if outAcc.Nonce == nil || outAcc.Nonce.Cmp(big.NewInt(int64(acc.GetNonce()))) < 0 { - return nil, process.ErrWrongNonceInVMOutput - } - - err = acc.SetNonceWithJournal(outAcc.Nonce.Uint64()) - if err != nil { - return nil, err - } - - if outAcc.Balance == nil { - return nil, process.ErrNilBalanceFromSC - } - - stAcc, ok := acc.(*state.Account) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - // if fake account, than VM only has transaction value as balance, so anything remaining is a plus - if fakeAcc != nil && !fakeAcc.IsInterfaceNil() { - outAcc.Balance = outAcc.Balance.Add(outAcc.Balance, stAcc.Balance) - } - - realBalanceChange := big.NewInt(0).Sub(outAcc.Balance, stAcc.Balance) - - // update the values according to SC output - err = stAcc.SetBalanceWithJournal(outAcc.Balance) - if err != nil { - return nil, err - } - - zero := big.NewInt(0) - if realBalanceChange.Cmp(zero) != 0 { - outAcc.Balance = realBalanceChange - crossOutAccs = append(crossOutAccs, outAcc) - } - } - - return crossOutAccs, nil + crossOutAccs := make([]*vmcommon.OutputAccount, 0) + for i := 0; i < len(outputAccounts); i++ { + outAcc := outputAccounts[i] + acc, err := sc.getAccountFromAddress(outAcc.Address) + if err != nil { + return nil, err + } + + fakeAcc := sc.tempAccounts.TempAccount(outAcc.Address) + + if acc == nil || acc.IsInterfaceNil() { + crossOutAccs = append(crossOutAccs, outAcc) + continue + } + + for j := 0; j < len(outAcc.StorageUpdates); j++ { + storeUpdate := outAcc.StorageUpdates[j] + acc.DataTrieTracker().SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + } + + if len(outAcc.StorageUpdates) > 0 { + //SC with data variables + err := sc.accounts.SaveDataTrie(acc) + if err != nil { + return nil, err + } + } + + if len(outAcc.Code) > 0 { + err = sc.accounts.PutCode(acc, outAcc.Code) + if err != nil { + return nil, err + } + + //TODO remove this when receipts are implemented + log.Info(fmt.Sprintf("*** Generated/called SC account: %s ***", hex.EncodeToString(outAcc.Address))) + } + + if outAcc.Nonce == nil || outAcc.Nonce.Cmp(big.NewInt(int64(acc.GetNonce()))) < 0 { + return nil, process.ErrWrongNonceInVMOutput + } + + err = acc.SetNonceWithJournal(outAcc.Nonce.Uint64()) + if err != nil { + return nil, err + } + + if outAcc.Balance == nil { + return nil, process.ErrNilBalanceFromSC + } + + stAcc, ok := acc.(*state.Account) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + // if fake account, than VM only has transaction value as balance, so anything remaining is a plus + if fakeAcc != nil && !fakeAcc.IsInterfaceNil() { + outAcc.Balance = outAcc.Balance.Add(outAcc.Balance, stAcc.Balance) + } + + realBalanceChange := big.NewInt(0).Sub(outAcc.Balance, stAcc.Balance) + + // update the values according to SC output + err = stAcc.SetBalanceWithJournal(outAcc.Balance) + if err != nil { + return nil, err + } + + zero := big.NewInt(0) + if realBalanceChange.Cmp(zero) != 0 { + outAcc.Balance = realBalanceChange + crossOutAccs = append(crossOutAccs, outAcc) + } + } + + return crossOutAccs, nil } // delete accounts - only suicide by current SC or another SC called by current SC - protected by VM func (sc *scProcessor) deleteAccounts(deletedAccounts [][]byte) error { - for _, value := range deletedAccounts { - acc, err := sc.getAccountFromAddress(value) - if err != nil { - return err - } - - if acc == nil || acc.IsInterfaceNil() { - //TODO: sharded Smart Contract processing - continue - } - - err = sc.accounts.RemoveAccount(acc.AddressContainer()) - if err != nil { - return err - } - } - return nil + for _, value := range deletedAccounts { + acc, err := sc.getAccountFromAddress(value) + if err != nil { + return err + } + + if acc == nil || acc.IsInterfaceNil() { + //TODO: sharded Smart Contract processing + continue + } + + err = sc.accounts.RemoveAccount(acc.AddressContainer()) + if err != nil { + return err + } + } + return nil } func (sc *scProcessor) processTouchedAccounts(touchedAccounts [][]byte) error { - //TODO: implement - return nil + //TODO: implement + return nil } func (sc *scProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - adrSrc, err := sc.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - shardForCurrentNode := sc.shardCoordinator.SelfId() - shardForSrc := sc.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := sc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + adrSrc, err := sc.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := sc.shardCoordinator.SelfId() + shardForSrc := sc.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := sc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } // GetAllSmartContractCallRootHash returns the roothash of the state of the SC executions for defined round func (sc *scProcessor) GetAllSmartContractCallRootHash(round uint64) []byte { - return []byte("roothash") + return []byte("roothash") } // saves VM output into state func (sc *scProcessor) saveSCOutputToCurrentState(output *vmcommon.VMOutput, round uint64, txHash []byte) error { - var err error - - sc.mutSCState.Lock() - defer sc.mutSCState.Unlock() - - if _, ok := sc.mapExecState[round]; !ok { - sc.mapExecState[round] = scExecutionState{ - allLogs: make(map[string][]*vmcommon.LogEntry), - allReturnData: make(map[string][]*big.Int), - returnCodes: make(map[string]vmcommon.ReturnCode)} - } - - tmpCurrScState := sc.mapExecState[round] - defer func() { - if err != nil { - sc.mapExecState[round] = tmpCurrScState - } - }() - - err = sc.saveReturnData(output.ReturnData, round, txHash) - if err != nil { - return err - } - - err = sc.saveReturnCode(output.ReturnCode, round, txHash) - if err != nil { - return err - } - - err = sc.saveLogsIntoState(output.Logs, round, txHash) - if err != nil { - return err - } - - return nil + var err error + + sc.mutSCState.Lock() + defer sc.mutSCState.Unlock() + + if _, ok := sc.mapExecState[round]; !ok { + sc.mapExecState[round] = scExecutionState{ + allLogs: make(map[string][]*vmcommon.LogEntry), + allReturnData: make(map[string][]*big.Int), + returnCodes: make(map[string]vmcommon.ReturnCode)} + } + + tmpCurrScState := sc.mapExecState[round] + defer func() { + if err != nil { + sc.mapExecState[round] = tmpCurrScState + } + }() + + err = sc.saveReturnData(output.ReturnData, round, txHash) + if err != nil { + return err + } + + err = sc.saveReturnCode(output.ReturnCode, round, txHash) + if err != nil { + return err + } + + err = sc.saveLogsIntoState(output.Logs, round, txHash) + if err != nil { + return err + } + + return nil } // saves return data into account state func (sc *scProcessor) saveReturnData(returnData []*big.Int, round uint64, txHash []byte) error { - sc.mapExecState[round].allReturnData[string(txHash)] = returnData - return nil + sc.mapExecState[round].allReturnData[string(txHash)] = returnData + return nil } // saves smart contract return code into account state func (sc *scProcessor) saveReturnCode(returnCode vmcommon.ReturnCode, round uint64, txHash []byte) error { - sc.mapExecState[round].returnCodes[string(txHash)] = returnCode - return nil + sc.mapExecState[round].returnCodes[string(txHash)] = returnCode + return nil } // save vm output logs into accounts func (sc *scProcessor) saveLogsIntoState(logs []*vmcommon.LogEntry, round uint64, txHash []byte) error { - sc.mapExecState[round].allLogs[string(txHash)] = logs - return nil + sc.mapExecState[round].allLogs[string(txHash)] = logs + return nil } // ProcessSmartContractResult updates the account state from the smart contract result func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error { - if scr == nil { - return process.ErrNilSmartContractResult - } - - accHandler, err := sc.getAccountFromAddress(scr.RcvAddr) - if err != nil { - return err - } - if accHandler == nil || accHandler.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - - stAcc, ok := accHandler.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - storageUpdates, err := sc.argsParser.GetStorageUpdates(scr.Data) - for i := 0; i < len(storageUpdates); i++ { - stAcc.DataTrieTracker().SaveKeyValue(storageUpdates[i].Offset, storageUpdates[i].Data) - } - - if len(scr.Data) > 0 { - //SC with data variables - err := sc.accounts.SaveDataTrie(stAcc) - if err != nil { - return err - } - } - - if len(scr.Code) > 0 { - err = sc.accounts.PutCode(stAcc, scr.Code) - if err != nil { - return err - } - } - - if scr.Value == nil { - return process.ErrNilBalanceFromSC - } - - operation := big.NewInt(0) - operation = operation.Add(scr.Value, stAcc.Balance) - err = stAcc.SetBalanceWithJournal(operation) - if err != nil { - return err - } - - return nil + if scr == nil { + return process.ErrNilSmartContractResult + } + + accHandler, err := sc.getAccountFromAddress(scr.RcvAddr) + if err != nil { + return err + } + if accHandler == nil || accHandler.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + + stAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + storageUpdates, err := sc.argsParser.GetStorageUpdates(scr.Data) + for i := 0; i < len(storageUpdates); i++ { + stAcc.DataTrieTracker().SaveKeyValue(storageUpdates[i].Offset, storageUpdates[i].Data) + } + + if len(scr.Data) > 0 { + //SC with data variables + err := sc.accounts.SaveDataTrie(stAcc) + if err != nil { + return err + } + } + + if len(scr.Code) > 0 { + err = sc.accounts.PutCode(stAcc, scr.Code) + if err != nil { + return err + } + } + + if scr.Value == nil { + return process.ErrNilBalanceFromSC + } + + operation := big.NewInt(0) + operation = operation.Add(scr.Value, stAcc.Balance) + err = stAcc.SetBalanceWithJournal(operation) + if err != nil { + return err + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface func (sc *scProcessor) IsInterfaceNil() bool { - if sc == nil { - return true - } - return false + if sc == nil { + return true + } + return false } diff --git a/process/transaction/process.go b/process/transaction/process.go index 86638aa9807..2903848424f 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -26,26 +26,26 @@ var mutTxFee sync.RWMutex // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - hasher hashing.Hasher - scProcessor process.SmartContractProcessor - marshalizer marshal.Marshalizer - txFeeHandler process.TransactionFeeHandler - shardCoordinator sharding.Coordinator - txTypeHandler process.TxTypeHandler + accounts state.AccountsAdapter + adrConv state.AddressConverter + hasher hashing.Hasher + scProcessor process.SmartContractProcessor + marshalizer marshal.Marshalizer + txFeeHandler process.TransactionFeeHandler + shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine func NewTxProcessor( - accounts state.AccountsAdapter, - hasher hashing.Hasher, - addressConv state.AddressConverter, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - scProcessor process.SmartContractProcessor, - txFeeHandler process.TransactionFeeHandler, - txTypeHandler process.TxTypeHandler, + accounts state.AccountsAdapter, + hasher hashing.Hasher, + addressConv state.AddressConverter, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + scProcessor process.SmartContractProcessor, + txFeeHandler process.TransactionFeeHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { if accounts == nil || accounts.IsInterfaceNil() { @@ -73,16 +73,16 @@ func NewTxProcessor( return nil, process.ErrNilTxTypeHandler } - return &txProcessor{ - accounts: accounts, - hasher: hasher, - adrConv: addressConv, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - scProcessor: scProcessor, - txFeeHandler: txFeeHandler, - txTypeHandler: txTypeHandler, - }, nil + return &txProcessor{ + accounts: accounts, + hasher: hasher, + adrConv: addressConv, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + scProcessor: scProcessor, + txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, + }, nil } // ProcessTransaction modifies the account states in respect with the transaction data @@ -187,7 +187,7 @@ func (txProc *txProcessor) processMoveBalance( } } - txProc.txFeeHandler.ProcessTransactionFee(txFee) + txProc.txFeeHandler.ProcessTransactionFee(txFee) return nil } @@ -383,8 +383,8 @@ func (txProc *txProcessor) increaseNonce(acntSrc *state.Account) error { // IsInterfaceNil returns true if there is no value under the interface func (txProc *txProcessor) IsInterfaceNil() bool { - if txProc == nil { - return true - } - return false + if txProc == nil { + return true + } + return false } From ba9f8c1e8068ce399aeafbf9144683cb83485858 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 10 Sep 2019 12:55:28 +0300 Subject: [PATCH 092/234] format spaces to tabs --- cmd/node/factory/structs.go | 3158 +++--- config/config.go | 208 +- consensus/mock/blockProcessorMock.go | 58 +- data/address/specialAddresses.go | 116 +- .../shard/resolversContainerFactory.go | 918 +- .../shard/resolversContainerFactory_test.go | 702 +- integrationTests/mock/blockProcessorMock.go | 100 +- .../mock/specialAddressHandlerMock.go | 74 +- .../interceptedHeadersSigVerification_test.go | 292 +- .../smartContract/testInitilalizer.go | 1588 +-- .../interceptedResolvedBulkTx_test.go | 638 +- .../transaction/interceptedResolvedTx_test.go | 344 +- integrationTests/testInitializer.go | 1634 +-- integrationTests/testProcessorNode.go | 1398 +-- .../testProcessorNodeWithMultisigner.go | 326 +- node/mock/blockProcessorStub.go | 62 +- process/block/baseProcess.go | 818 +- process/block/displayBlock.go | 346 +- process/block/metablock.go | 2486 ++--- process/block/preprocess/export_test.go | 54 +- .../block/preprocess/rewardTxPreProcessor.go | 744 +- process/block/preprocess/rewardsHandler.go | 642 +- .../block/preprocess/rewardsHandler_test.go | 658 +- process/block/preprocess/transactions.go | 2 +- process/block/preprocess/transactions_test.go | 1448 +-- process/block/shardblock.go | 2792 ++--- process/block/shardblock_test.go | 9142 ++++++++--------- process/coordinator/process.go | 1338 +-- process/coordinator/process_test.go | 3386 +++--- process/errors.go | 2 +- .../shard/interceptorsContainerFactory.go | 770 +- .../interceptorsContainerFactory_test.go | 1116 +- .../intermediateProcessorsContainerFactory.go | 194 +- ...rmediateProcessorsContainerFactory_test.go | 228 +- process/interface.go | 398 +- process/mock/blockProcessorMock.go | 64 +- process/mock/specialAddressHandlerMock.go | 104 +- .../interceptedRewardTransaction.go | 194 +- process/rewardTransaction/interceptor.go | 238 +- process/rewardTransaction/process.go | 176 +- process/smartContract/process.go | 1344 +-- process/transaction/process.go | 602 +- 42 files changed, 20451 insertions(+), 20451 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index ab109a1418e..ff2e95610b0 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1,1850 +1,1850 @@ package factory import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "math/big" - "path/filepath" - "time" - - "github.com/ElrondNetwork/elrond-go/config" - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/round" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/genesis" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - blsMultiSig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/address" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - shardfactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/hashing/blake2b" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/ntp" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - factoryP2P "github.com/ElrondNetwork/elrond-go/p2p/libp2p/factory" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - processSync "github.com/ElrondNetwork/elrond-go/process/sync" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" - factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" - "github.com/ElrondNetwork/elrond-go/statusHandler/view" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/urfave/cli" + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "math/big" + "path/filepath" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/round" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/genesis" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + blsMultiSig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/address" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + shardfactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/ntp" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + factoryP2P "github.com/ElrondNetwork/elrond-go/p2p/libp2p/factory" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + processSync "github.com/ElrondNetwork/elrond-go/process/sync" + "github.com/ElrondNetwork/elrond-go/process/track" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" + factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" + "github.com/ElrondNetwork/elrond-go/statusHandler/view" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/urfave/cli" ) const ( - // BlsHashSize specifies the hash size for using bls scheme - BlsHashSize = 16 + // BlsHashSize specifies the hash size for using bls scheme + BlsHashSize = 16 - // BlsConsensusType specifies te signature scheme used in the consensus - BlsConsensusType = "bls" + // BlsConsensusType specifies te signature scheme used in the consensus + BlsConsensusType = "bls" - // BnConsensusType specifies te signature scheme used in the consensus - BnConsensusType = "bn" + // BnConsensusType specifies te signature scheme used in the consensus + BnConsensusType = "bn" - // MaxTxsToRequest specifies the maximum number of txs to request - MaxTxsToRequest = 100 + // MaxTxsToRequest specifies the maximum number of txs to request + MaxTxsToRequest = 100 ) var log = logger.DefaultLogger() // Network struct holds the network components of the Elrond protocol type Network struct { - NetMessenger p2p.Messenger + NetMessenger p2p.Messenger } // Core struct holds the core components of the Elrond protocol type Core struct { - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - Trie data.Trie - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - StatusHandler core.AppStatusHandler + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Trie data.Trie + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + StatusHandler core.AppStatusHandler } // State struct holds the state components of the Elrond protocol type State struct { - AddressConverter state.AddressConverter - AccountsAdapter state.AccountsAdapter - InBalanceForShard map[string]*big.Int + AddressConverter state.AddressConverter + AccountsAdapter state.AccountsAdapter + InBalanceForShard map[string]*big.Int } // Data struct holds the data components of the Elrond protocol type Data struct { - Blkc data.ChainHandler - Store dataRetriever.StorageService - Datapool dataRetriever.PoolsHolder - MetaDatapool dataRetriever.MetaPoolsHolder + Blkc data.ChainHandler + Store dataRetriever.StorageService + Datapool dataRetriever.PoolsHolder + MetaDatapool dataRetriever.MetaPoolsHolder } // Crypto struct holds the crypto components of the Elrond protocol type Crypto struct { - TxSingleSigner crypto.SingleSigner - SingleSigner crypto.SingleSigner - MultiSigner crypto.MultiSigner - TxSignKeyGen crypto.KeyGenerator - TxSignPrivKey crypto.PrivateKey - TxSignPubKey crypto.PublicKey - InitialPubKeys map[uint32][]string + TxSingleSigner crypto.SingleSigner + SingleSigner crypto.SingleSigner + MultiSigner crypto.MultiSigner + TxSignKeyGen crypto.KeyGenerator + TxSignPrivKey crypto.PrivateKey + TxSignPubKey crypto.PublicKey + InitialPubKeys map[uint32][]string } // Process struct holds the process components of the Elrond protocol type Process struct { - InterceptorsContainer process.InterceptorsContainer - ResolversFinder dataRetriever.ResolversFinder - Rounder consensus.Rounder - ForkDetector process.ForkDetector - BlockProcessor process.BlockProcessor - BlockTracker process.BlocksTracker + InterceptorsContainer process.InterceptorsContainer + ResolversFinder dataRetriever.ResolversFinder + Rounder consensus.Rounder + ForkDetector process.ForkDetector + BlockProcessor process.BlockProcessor + BlockTracker process.BlocksTracker } type coreComponentsFactoryArgs struct { - config *config.Config - uniqueID string + config *config.Config + uniqueID string } // NewCoreComponentsFactoryArgs initializes the arguments necessary for creating the core components func NewCoreComponentsFactoryArgs(config *config.Config, uniqueID string) *coreComponentsFactoryArgs { - return &coreComponentsFactoryArgs{ - config: config, - uniqueID: uniqueID, - } + return &coreComponentsFactoryArgs{ + config: config, + uniqueID: uniqueID, + } } // CoreComponentsFactory creates the core components func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { - hasher, err := getHasherFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create hasher: " + err.Error()) - } - - marshalizer, err := getMarshalizerFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create marshalizer: " + err.Error()) - } - - merkleTrie, err := getTrie(args.config.AccountsTrieStorage, marshalizer, hasher, args.uniqueID) - if err != nil { - return nil, errors.New("error creating trie: " + err.Error()) - } - uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() - - return &Core{ - Hasher: hasher, - Marshalizer: marshalizer, - Trie: merkleTrie, - Uint64ByteSliceConverter: uint64ByteSliceConverter, - StatusHandler: statusHandler.NewNilStatusHandler(), - }, nil + hasher, err := getHasherFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create hasher: " + err.Error()) + } + + marshalizer, err := getMarshalizerFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create marshalizer: " + err.Error()) + } + + merkleTrie, err := getTrie(args.config.AccountsTrieStorage, marshalizer, hasher, args.uniqueID) + if err != nil { + return nil, errors.New("error creating trie: " + err.Error()) + } + uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() + + return &Core{ + Hasher: hasher, + Marshalizer: marshalizer, + Trie: merkleTrie, + Uint64ByteSliceConverter: uint64ByteSliceConverter, + StatusHandler: statusHandler.NewNilStatusHandler(), + }, nil } type stateComponentsFactoryArgs struct { - config *config.Config - genesisConfig *sharding.Genesis - shardCoordinator sharding.Coordinator - core *Core + config *config.Config + genesisConfig *sharding.Genesis + shardCoordinator sharding.Coordinator + core *Core } // NewStateComponentsFactoryArgs initializes the arguments necessary for creating the state components func NewStateComponentsFactoryArgs( - config *config.Config, - genesisConfig *sharding.Genesis, - shardCoordinator sharding.Coordinator, - core *Core, + config *config.Config, + genesisConfig *sharding.Genesis, + shardCoordinator sharding.Coordinator, + core *Core, ) *stateComponentsFactoryArgs { - return &stateComponentsFactoryArgs{ - config: config, - genesisConfig: genesisConfig, - shardCoordinator: shardCoordinator, - core: core, - } + return &stateComponentsFactoryArgs{ + config: config, + genesisConfig: genesisConfig, + shardCoordinator: shardCoordinator, + core: core, + } } // StateComponentsFactory creates the state components func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { - addressConverter, err := addressConverters.NewPlainAddressConverter( - args.config.Address.Length, - args.config.Address.Prefix, - ) - - if err != nil { - return nil, errors.New("could not create address converter: " + err.Error()) - } - - accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) - if err != nil { - return nil, errors.New("could not create account factory: " + err.Error()) - } - - accountsAdapter, err := state.NewAccountsDB(args.core.Trie, args.core.Hasher, args.core.Marshalizer, accountFactory) - if err != nil { - return nil, errors.New("could not create accounts adapter: " + err.Error()) - } - - inBalanceForShard, err := args.genesisConfig.InitialNodesBalances(args.shardCoordinator, addressConverter) - if err != nil { - return nil, errors.New("initial balances could not be processed " + err.Error()) - } - - return &State{ - AddressConverter: addressConverter, - AccountsAdapter: accountsAdapter, - InBalanceForShard: inBalanceForShard, - }, nil + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.config.Address.Length, + args.config.Address.Prefix, + ) + + if err != nil { + return nil, errors.New("could not create address converter: " + err.Error()) + } + + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) + if err != nil { + return nil, errors.New("could not create account factory: " + err.Error()) + } + + accountsAdapter, err := state.NewAccountsDB(args.core.Trie, args.core.Hasher, args.core.Marshalizer, accountFactory) + if err != nil { + return nil, errors.New("could not create accounts adapter: " + err.Error()) + } + + inBalanceForShard, err := args.genesisConfig.InitialNodesBalances(args.shardCoordinator, addressConverter) + if err != nil { + return nil, errors.New("initial balances could not be processed " + err.Error()) + } + + return &State{ + AddressConverter: addressConverter, + AccountsAdapter: accountsAdapter, + InBalanceForShard: inBalanceForShard, + }, nil } type dataComponentsFactoryArgs struct { - config *config.Config - shardCoordinator sharding.Coordinator - core *Core - uniqueID string + config *config.Config + shardCoordinator sharding.Coordinator + core *Core + uniqueID string } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components func NewDataComponentsFactoryArgs( - config *config.Config, - shardCoordinator sharding.Coordinator, - core *Core, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + core *Core, + uniqueID string, ) *dataComponentsFactoryArgs { - return &dataComponentsFactoryArgs{ - config: config, - shardCoordinator: shardCoordinator, - core: core, - uniqueID: uniqueID, - } + return &dataComponentsFactoryArgs{ + config: config, + shardCoordinator: shardCoordinator, + core: core, + uniqueID: uniqueID, + } } // DataComponentsFactory creates the data components func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { - var datapool dataRetriever.PoolsHolder - var metaDatapool dataRetriever.MetaPoolsHolder - blkc, err := createBlockChainFromConfig(args.config, args.shardCoordinator, args.core.StatusHandler) - if err != nil { - return nil, errors.New("could not create block chain: " + err.Error()) - } - - store, err := createDataStoreFromConfig(args.config, args.shardCoordinator, args.uniqueID) - if err != nil { - return nil, errors.New("could not create local data store: " + err.Error()) - } - - if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { - datapool, err = createShardDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) - if err != nil { - return nil, errors.New("could not create shard data pools: " + err.Error()) - } - } - if args.shardCoordinator.SelfId() == sharding.MetachainShardId { - metaDatapool, err = createMetaDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) - if err != nil { - return nil, errors.New("could not create shard data pools: " + err.Error()) - } - } - if datapool == nil && metaDatapool == nil { - return nil, errors.New("could not create data pools: ") - } - - return &Data{ - Blkc: blkc, - Store: store, - Datapool: datapool, - MetaDatapool: metaDatapool, - }, nil + var datapool dataRetriever.PoolsHolder + var metaDatapool dataRetriever.MetaPoolsHolder + blkc, err := createBlockChainFromConfig(args.config, args.shardCoordinator, args.core.StatusHandler) + if err != nil { + return nil, errors.New("could not create block chain: " + err.Error()) + } + + store, err := createDataStoreFromConfig(args.config, args.shardCoordinator, args.uniqueID) + if err != nil { + return nil, errors.New("could not create local data store: " + err.Error()) + } + + if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { + datapool, err = createShardDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + if err != nil { + return nil, errors.New("could not create shard data pools: " + err.Error()) + } + } + if args.shardCoordinator.SelfId() == sharding.MetachainShardId { + metaDatapool, err = createMetaDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + if err != nil { + return nil, errors.New("could not create shard data pools: " + err.Error()) + } + } + if datapool == nil && metaDatapool == nil { + return nil, errors.New("could not create data pools: ") + } + + return &Data{ + Blkc: blkc, + Store: store, + Datapool: datapool, + MetaDatapool: metaDatapool, + }, nil } type cryptoComponentsFactoryArgs struct { - ctx *cli.Context - config *config.Config - nodesConfig *sharding.NodesSetup - shardCoordinator sharding.Coordinator - keyGen crypto.KeyGenerator - privKey crypto.PrivateKey - log *logger.Logger - initialBalancesSkPemFileName string - txSignSkName string - txSignSkIndexName string + ctx *cli.Context + config *config.Config + nodesConfig *sharding.NodesSetup + shardCoordinator sharding.Coordinator + keyGen crypto.KeyGenerator + privKey crypto.PrivateKey + log *logger.Logger + initialBalancesSkPemFileName string + txSignSkName string + txSignSkIndexName string } // NewCryptoComponentsFactoryArgs initializes the arguments necessary for creating the crypto components func NewCryptoComponentsFactoryArgs( - ctx *cli.Context, - config *config.Config, - nodesConfig *sharding.NodesSetup, - shardCoordinator sharding.Coordinator, - keyGen crypto.KeyGenerator, - privKey crypto.PrivateKey, - log *logger.Logger, - initialBalancesSkPemFileName string, - txSignSkName string, - txSignSkIndexName string, + ctx *cli.Context, + config *config.Config, + nodesConfig *sharding.NodesSetup, + shardCoordinator sharding.Coordinator, + keyGen crypto.KeyGenerator, + privKey crypto.PrivateKey, + log *logger.Logger, + initialBalancesSkPemFileName string, + txSignSkName string, + txSignSkIndexName string, ) *cryptoComponentsFactoryArgs { - return &cryptoComponentsFactoryArgs{ - ctx: ctx, - config: config, - nodesConfig: nodesConfig, - shardCoordinator: shardCoordinator, - keyGen: keyGen, - privKey: privKey, - log: log, - initialBalancesSkPemFileName: initialBalancesSkPemFileName, - txSignSkName: txSignSkName, - txSignSkIndexName: txSignSkIndexName, - } + return &cryptoComponentsFactoryArgs{ + ctx: ctx, + config: config, + nodesConfig: nodesConfig, + shardCoordinator: shardCoordinator, + keyGen: keyGen, + privKey: privKey, + log: log, + initialBalancesSkPemFileName: initialBalancesSkPemFileName, + txSignSkName: txSignSkName, + txSignSkIndexName: txSignSkIndexName, + } } // CryptoComponentsFactory creates the crypto components func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) { - initialPubKeys := args.nodesConfig.InitialNodesPubKeys() - txSingleSigner := &singlesig.SchnorrSigner{} - singleSigner, err := createSingleSigner(args.config) - if err != nil { - return nil, errors.New("could not create singleSigner: " + err.Error()) - } - - multisigHasher, err := getMultisigHasherFromConfig(args.config) - if err != nil { - return nil, errors.New("could not create multisig hasher: " + err.Error()) - } - - currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) - if err != nil { - return nil, errors.New("could not start creation of multiSigner: " + err.Error()) - } - - multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) - if err != nil { - return nil, err - } - - initialBalancesSkPemFileName := args.ctx.GlobalString(args.initialBalancesSkPemFileName) - txSignKeyGen, txSignPrivKey, txSignPubKey, err := GetSigningParams( - args.ctx, - args.log, - args.txSignSkName, - args.txSignSkIndexName, - initialBalancesSkPemFileName, - kyber.NewBlakeSHA256Ed25519()) - if err != nil { - return nil, err - } - args.log.Info("Starting with tx sign public key: " + GetPkEncoded(txSignPubKey)) - - return &Crypto{ - TxSingleSigner: txSingleSigner, - SingleSigner: singleSigner, - MultiSigner: multiSigner, - TxSignKeyGen: txSignKeyGen, - TxSignPrivKey: txSignPrivKey, - TxSignPubKey: txSignPubKey, - InitialPubKeys: initialPubKeys, - }, nil + initialPubKeys := args.nodesConfig.InitialNodesPubKeys() + txSingleSigner := &singlesig.SchnorrSigner{} + singleSigner, err := createSingleSigner(args.config) + if err != nil { + return nil, errors.New("could not create singleSigner: " + err.Error()) + } + + multisigHasher, err := getMultisigHasherFromConfig(args.config) + if err != nil { + return nil, errors.New("could not create multisig hasher: " + err.Error()) + } + + currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) + if err != nil { + return nil, errors.New("could not start creation of multiSigner: " + err.Error()) + } + + multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) + if err != nil { + return nil, err + } + + initialBalancesSkPemFileName := args.ctx.GlobalString(args.initialBalancesSkPemFileName) + txSignKeyGen, txSignPrivKey, txSignPubKey, err := GetSigningParams( + args.ctx, + args.log, + args.txSignSkName, + args.txSignSkIndexName, + initialBalancesSkPemFileName, + kyber.NewBlakeSHA256Ed25519()) + if err != nil { + return nil, err + } + args.log.Info("Starting with tx sign public key: " + GetPkEncoded(txSignPubKey)) + + return &Crypto{ + TxSingleSigner: txSingleSigner, + SingleSigner: singleSigner, + MultiSigner: multiSigner, + TxSignKeyGen: txSignKeyGen, + TxSignPrivKey: txSignPrivKey, + TxSignPubKey: txSignPubKey, + InitialPubKeys: initialPubKeys, + }, nil } // NetworkComponentsFactory creates the network components func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, core *Core) (*Network, error) { - var randReader io.Reader - if p2pConfig.Node.Seed != "" { - randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) - } else { - randReader = rand.Reader - } - - netMessenger, err := createNetMessenger(p2pConfig, log, randReader) - if err != nil { - return nil, err - } - - return &Network{ - NetMessenger: netMessenger, - }, nil + var randReader io.Reader + if p2pConfig.Node.Seed != "" { + randReader = NewSeedRandReader(core.Hasher.Compute(p2pConfig.Node.Seed)) + } else { + randReader = rand.Reader + } + + netMessenger, err := createNetMessenger(p2pConfig, log, randReader) + if err != nil { + return nil, err + } + + return &Network{ + NetMessenger: netMessenger, + }, nil } type processComponentsFactoryArgs struct { - genesisConfig *sharding.Genesis - economicsConfig *config.EconomicsConfig - nodesConfig *sharding.NodesSetup - syncer ntp.SyncTimer - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - data *Data - core *Core - crypto *Crypto - state *State - network *Network - coreServiceContainer serviceContainer.Core + genesisConfig *sharding.Genesis + economicsConfig *config.EconomicsConfig + nodesConfig *sharding.NodesSetup + syncer ntp.SyncTimer + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + data *Data + core *Core + crypto *Crypto + state *State + network *Network + coreServiceContainer serviceContainer.Core } // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( - genesisConfig *sharding.Genesis, - economicsConfig *config.EconomicsConfig, - nodesConfig *sharding.NodesSetup, - syncer ntp.SyncTimer, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, - coreServiceContainer serviceContainer.Core, + genesisConfig *sharding.Genesis, + economicsConfig *config.EconomicsConfig, + nodesConfig *sharding.NodesSetup, + syncer ntp.SyncTimer, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, + coreServiceContainer serviceContainer.Core, ) *processComponentsFactoryArgs { - return &processComponentsFactoryArgs{ - genesisConfig: genesisConfig, - economicsConfig: economicsConfig, - nodesConfig: nodesConfig, - syncer: syncer, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - data: data, - core: core, - crypto: crypto, - state: state, - network: network, - coreServiceContainer: coreServiceContainer, - } + return &processComponentsFactoryArgs{ + genesisConfig: genesisConfig, + economicsConfig: economicsConfig, + nodesConfig: nodesConfig, + syncer: syncer, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + data: data, + core: core, + crypto: crypto, + state: state, + network: network, + coreServiceContainer: coreServiceContainer, + } } // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { - interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) - if err != nil { - return nil, err - } - - //TODO refactor all these factory calls - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - return nil, err - } - - resolversContainer, err := resolversContainerFactory.Create() - if err != nil { - return nil, err - } - - resolversFinder, err := containers.NewResolversFinder(resolversContainer, args.shardCoordinator) - if err != nil { - return nil, err - } - - rounder, err := round.NewRound( - time.Unix(args.nodesConfig.StartTime, 0), - args.syncer.CurrentTime(), - time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), - args.syncer) - if err != nil { - return nil, err - } - - forkDetector, err := processSync.NewBasicForkDetector(rounder) - if err != nil { - return nil, err - } - - shardsGenesisBlocks, err := generateGenesisHeadersAndApplyInitialBalances( - args.core, - args.state, - args.shardCoordinator, - args.nodesConfig, - args.genesisConfig, - ) - if err != nil { - return nil, err - } - - err = prepareGenesisBlock(args, shardsGenesisBlocks) - if err != nil { - return nil, err - } - - blockProcessor, blockTracker, err := newBlockProcessorAndTracker( - resolversFinder, - args.shardCoordinator, - args.nodesCoordinator, - args.economicsConfig, - args.data, - args.core, - args.state, - forkDetector, - shardsGenesisBlocks, - args.nodesConfig, - args.coreServiceContainer, - ) - - if err != nil { - return nil, err - } - - return &Process{ - InterceptorsContainer: interceptorsContainer, - ResolversFinder: resolversFinder, - Rounder: rounder, - ForkDetector: forkDetector, - BlockProcessor: blockProcessor, - BlockTracker: blockTracker, - }, nil + interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( + args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) + if err != nil { + return nil, err + } + + //TODO refactor all these factory calls + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + return nil, err + } + + resolversContainer, err := resolversContainerFactory.Create() + if err != nil { + return nil, err + } + + resolversFinder, err := containers.NewResolversFinder(resolversContainer, args.shardCoordinator) + if err != nil { + return nil, err + } + + rounder, err := round.NewRound( + time.Unix(args.nodesConfig.StartTime, 0), + args.syncer.CurrentTime(), + time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), + args.syncer) + if err != nil { + return nil, err + } + + forkDetector, err := processSync.NewBasicForkDetector(rounder) + if err != nil { + return nil, err + } + + shardsGenesisBlocks, err := generateGenesisHeadersAndApplyInitialBalances( + args.core, + args.state, + args.shardCoordinator, + args.nodesConfig, + args.genesisConfig, + ) + if err != nil { + return nil, err + } + + err = prepareGenesisBlock(args, shardsGenesisBlocks) + if err != nil { + return nil, err + } + + blockProcessor, blockTracker, err := newBlockProcessorAndTracker( + resolversFinder, + args.shardCoordinator, + args.nodesCoordinator, + args.economicsConfig, + args.data, + args.core, + args.state, + forkDetector, + shardsGenesisBlocks, + args.nodesConfig, + args.coreServiceContainer, + ) + + if err != nil { + return nil, err + } + + return &Process{ + InterceptorsContainer: interceptorsContainer, + ResolversFinder: resolversFinder, + Rounder: rounder, + ForkDetector: forkDetector, + BlockProcessor: blockProcessor, + BlockTracker: blockTracker, + }, nil } func prepareGenesisBlock(args *processComponentsFactoryArgs, shardsGenesisBlocks map[uint32]data.HeaderHandler) error { - genesisBlock, ok := shardsGenesisBlocks[args.shardCoordinator.SelfId()] - if !ok { - return errors.New("genesis block does not exists") - } - - genesisBlockHash, err := core.CalculateHash(args.core.Marshalizer, args.core.Hasher, genesisBlock) - if err != nil { - return err - } - - err = args.data.Blkc.SetGenesisHeader(genesisBlock) - if err != nil { - return err - } - - args.data.Blkc.SetGenesisHeaderHash(genesisBlockHash) - - marshalizedBlock, err := args.core.Marshalizer.Marshal(genesisBlock) - if err != nil { - return err - } - - if args.shardCoordinator.SelfId() == sharding.MetachainShardId { - errNotCritical := args.data.Store.Put(dataRetriever.MetaBlockUnit, genesisBlockHash, marshalizedBlock) - log.LogIfError(errNotCritical) - - } else { - errNotCritical := args.data.Store.Put(dataRetriever.BlockHeaderUnit, genesisBlockHash, marshalizedBlock) - log.LogIfError(errNotCritical) - } - - return nil + genesisBlock, ok := shardsGenesisBlocks[args.shardCoordinator.SelfId()] + if !ok { + return errors.New("genesis block does not exists") + } + + genesisBlockHash, err := core.CalculateHash(args.core.Marshalizer, args.core.Hasher, genesisBlock) + if err != nil { + return err + } + + err = args.data.Blkc.SetGenesisHeader(genesisBlock) + if err != nil { + return err + } + + args.data.Blkc.SetGenesisHeaderHash(genesisBlockHash) + + marshalizedBlock, err := args.core.Marshalizer.Marshal(genesisBlock) + if err != nil { + return err + } + + if args.shardCoordinator.SelfId() == sharding.MetachainShardId { + errNotCritical := args.data.Store.Put(dataRetriever.MetaBlockUnit, genesisBlockHash, marshalizedBlock) + log.LogIfError(errNotCritical) + + } else { + errNotCritical := args.data.Store.Put(dataRetriever.BlockHeaderUnit, genesisBlockHash, marshalizedBlock) + log.LogIfError(errNotCritical) + } + + return nil } type seedRandReader struct { - index int - seed []byte + index int + seed []byte } // NewSeedRandReader will return a new instance of a seed-based reader func NewSeedRandReader(seed []byte) *seedRandReader { - return &seedRandReader{seed: seed, index: 0} + return &seedRandReader{seed: seed, index: 0} } func (srr *seedRandReader) Read(p []byte) (n int, err error) { - if srr.seed == nil { - return 0, errors.New("nil seed") - } - if len(srr.seed) == 0 { - return 0, errors.New("empty seed") - } - if p == nil { - return 0, errors.New("nil buffer") - } - if len(p) == 0 { - return 0, errors.New("empty buffer") - } - - for i := 0; i < len(p); i++ { - p[i] = srr.seed[srr.index] - - srr.index++ - srr.index = srr.index % len(srr.seed) - } - - return len(p), nil + if srr.seed == nil { + return 0, errors.New("nil seed") + } + if len(srr.seed) == 0 { + return 0, errors.New("empty seed") + } + if p == nil { + return 0, errors.New("nil buffer") + } + if len(p) == 0 { + return 0, errors.New("empty buffer") + } + + for i := 0; i < len(p); i++ { + p[i] = srr.seed[srr.index] + + srr.index++ + srr.index = srr.index % len(srr.seed) + } + + return len(p), nil } // CreateStatusHandlerPresenter will return an instance of PresenterStatusHandler func CreateStatusHandlerPresenter() view.Presenter { - presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() + presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() - return presenterStatusHandlerFactory.Create() + return presenterStatusHandlerFactory.Create() } // CreateViews will start an termui console and will return an object if cannot create and start termuiConsole func CreateViews(presenter view.Presenter) ([]factoryViews.Viewer, error) { - viewsFactory, err := factoryViews.NewViewsFactory(presenter) - if err != nil { - return nil, err - } - - views, err := viewsFactory.Create() - if err != nil { - return nil, err - } - - for _, v := range views { - err = v.Start() - if err != nil { - return nil, err - } - } - - return views, nil + viewsFactory, err := factoryViews.NewViewsFactory(presenter) + if err != nil { + return nil, err + } + + views, err := viewsFactory.Create() + if err != nil { + return nil, err + } + + for _, v := range views { + err = v.Start() + if err != nil { + return nil, err + } + } + + return views, nil } func getHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - switch cfg.Hasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - return blake2b.Blake2b{}, nil - } - - return nil, errors.New("no hasher provided in config file") + switch cfg.Hasher.Type { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + return blake2b.Blake2b{}, nil + } + + return nil, errors.New("no hasher provided in config file") } func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { - switch cfg.Marshalizer.Type { - case "json": - return &marshal.JsonMarshalizer{}, nil - } + switch cfg.Marshalizer.Type { + case "json": + return &marshal.JsonMarshalizer{}, nil + } - return nil, errors.New("no marshalizer provided in config file") + return nil, errors.New("no marshalizer provided in config file") } func getTrie( - cfg config.StorageConfig, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - uniqueID string, + cfg config.StorageConfig, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + uniqueID string, ) (data.Trie, error) { - accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(cfg.Cache), - getDBFromConfig(cfg.DB, uniqueID), - getBloomFromConfig(cfg.Bloom), - ) - if err != nil { - return nil, errors.New("error creating accountsTrieStorage: " + err.Error()) - } + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(cfg.Cache), + getDBFromConfig(cfg.DB, uniqueID), + getBloomFromConfig(cfg.Bloom), + ) + if err != nil { + return nil, errors.New("error creating accountsTrieStorage: " + err.Error()) + } - return trie.NewTrie(accountsTrieStorage, marshalizer, hasher) + return trie.NewTrie(accountsTrieStorage, marshalizer, hasher) } func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { - badBlockCache, err := storageUnit.NewCache( - storageUnit.CacheType(config.BadBlocksCache.Type), - config.BadBlocksCache.Size, - config.BadBlocksCache.Shards) - if err != nil { - return nil, err - } - - if coordinator == nil { - return nil, state.ErrNilShardCoordinator - } - - if coordinator.SelfId() < coordinator.NumberOfShards() { - blockChain, err := blockchain.NewBlockChain(badBlockCache) - if err != nil { - return nil, err - } - - err = blockChain.SetAppStatusHandler(ash) - if err != nil { - return nil, err - } - - return blockChain, nil - } - if coordinator.SelfId() == sharding.MetachainShardId { - blockChain, err := blockchain.NewMetaChain(badBlockCache) - if err != nil { - return nil, err - } - - err = blockChain.SetAppStatusHandler(ash) - if err != nil { - return nil, err - } - - return blockChain, nil - } - return nil, errors.New("can not create blockchain") + badBlockCache, err := storageUnit.NewCache( + storageUnit.CacheType(config.BadBlocksCache.Type), + config.BadBlocksCache.Size, + config.BadBlocksCache.Shards) + if err != nil { + return nil, err + } + + if coordinator == nil { + return nil, state.ErrNilShardCoordinator + } + + if coordinator.SelfId() < coordinator.NumberOfShards() { + blockChain, err := blockchain.NewBlockChain(badBlockCache) + if err != nil { + return nil, err + } + + err = blockChain.SetAppStatusHandler(ash) + if err != nil { + return nil, err + } + + return blockChain, nil + } + if coordinator.SelfId() == sharding.MetachainShardId { + blockChain, err := blockchain.NewMetaChain(badBlockCache) + if err != nil { + return nil, err + } + + err = blockChain.SetAppStatusHandler(ash) + if err != nil { + return nil, err + } + + return blockChain, nil + } + return nil, errors.New("can not create blockchain") } func createDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return createMetaChainDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - return nil, errors.New("can not create data store") + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return createMetaChainDataStoreFromConfig(config, shardCoordinator, uniqueID) + } + return nil, errors.New("can not create data store") } func createShardDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - var headerUnit *storageUnit.Unit - var peerBlockUnit *storageUnit.Unit - var miniBlockUnit *storageUnit.Unit - var txUnit *storageUnit.Unit - var metachainHeaderUnit *storageUnit.Unit - var unsignedTxUnit *storageUnit.Unit - var rewardTxUnit *storageUnit.Unit - var metaHdrHashNonceUnit *storageUnit.Unit - var shardHdrHashNonceUnit *storageUnit.Unit - var err error - - defer func() { - // cleanup - if err != nil { - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if peerBlockUnit != nil { - _ = peerBlockUnit.DestroyUnit() - } - if miniBlockUnit != nil { - _ = miniBlockUnit.DestroyUnit() - } - if txUnit != nil { - _ = txUnit.DestroyUnit() - } - if unsignedTxUnit != nil { - _ = unsignedTxUnit.DestroyUnit() - } - if rewardTxUnit != nil { - _ = rewardTxUnit.DestroyUnit() - } - if metachainHeaderUnit != nil { - _ = metachainHeaderUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnit != nil { - _ = shardHdrHashNonceUnit.DestroyUnit() - } - } - }() - - txUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.TxStorage.Cache), - getDBFromConfig(config.TxStorage.DB, uniqueID), - getBloomFromConfig(config.TxStorage.Bloom)) - if err != nil { - return nil, err - } - - unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.UnsignedTransactionStorage.Cache), - getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), - getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) - if err != nil { - return nil, err - } - - rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.RewardTxStorage.Cache), - getDBFromConfig(config.RewardTxStorage.DB, uniqueID), - getBloomFromConfig(config.RewardTxStorage.Bloom)) - if err != nil { - return nil, err - } - - miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MiniBlocksStorage.Cache), - getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), - getBloomFromConfig(config.MiniBlocksStorage.Bloom)) - if err != nil { - return nil, err - } - - peerBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerBlockBodyStorage.Cache), - getDBFromConfig(config.PeerBlockBodyStorage.DB, uniqueID), - getBloomFromConfig(config.PeerBlockBodyStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metachainHeaderUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), - ) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnit, err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - shardCoordinator.SelfId(), - ) - if err != nil { - return nil, err - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, txUnit) - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) - store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) - - return store, err + var headerUnit *storageUnit.Unit + var peerBlockUnit *storageUnit.Unit + var miniBlockUnit *storageUnit.Unit + var txUnit *storageUnit.Unit + var metachainHeaderUnit *storageUnit.Unit + var unsignedTxUnit *storageUnit.Unit + var rewardTxUnit *storageUnit.Unit + var metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnit *storageUnit.Unit + var err error + + defer func() { + // cleanup + if err != nil { + if headerUnit != nil { + _ = headerUnit.DestroyUnit() + } + if peerBlockUnit != nil { + _ = peerBlockUnit.DestroyUnit() + } + if miniBlockUnit != nil { + _ = miniBlockUnit.DestroyUnit() + } + if txUnit != nil { + _ = txUnit.DestroyUnit() + } + if unsignedTxUnit != nil { + _ = unsignedTxUnit.DestroyUnit() + } + if rewardTxUnit != nil { + _ = rewardTxUnit.DestroyUnit() + } + if metachainHeaderUnit != nil { + _ = metachainHeaderUnit.DestroyUnit() + } + if metaHdrHashNonceUnit != nil { + _ = metaHdrHashNonceUnit.DestroyUnit() + } + if shardHdrHashNonceUnit != nil { + _ = shardHdrHashNonceUnit.DestroyUnit() + } + } + }() + + txUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.TxStorage.Cache), + getDBFromConfig(config.TxStorage.DB, uniqueID), + getBloomFromConfig(config.TxStorage.Bloom)) + if err != nil { + return nil, err + } + + unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.UnsignedTransactionStorage.Cache), + getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), + getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) + if err != nil { + return nil, err + } + + rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.RewardTxStorage.Cache), + getDBFromConfig(config.RewardTxStorage.DB, uniqueID), + getBloomFromConfig(config.RewardTxStorage.Bloom)) + if err != nil { + return nil, err + } + + miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MiniBlocksStorage.Cache), + getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), + getBloomFromConfig(config.MiniBlocksStorage.Bloom)) + if err != nil { + return nil, err + } + + peerBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.PeerBlockBodyStorage.Cache), + getDBFromConfig(config.PeerBlockBodyStorage.DB, uniqueID), + getBloomFromConfig(config.PeerBlockBodyStorage.Bloom)) + if err != nil { + return nil, err + } + + headerUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.BlockHeaderStorage.Cache), + getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), + getBloomFromConfig(config.BlockHeaderStorage.Bloom)) + if err != nil { + return nil, err + } + + metachainHeaderUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaBlockStorage.Cache), + getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), + getBloomFromConfig(config.MetaBlockStorage.Bloom)) + if err != nil { + return nil, err + } + + metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), + getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), + ) + if err != nil { + return nil, err + } + + shardHdrHashNonceUnit, err = storageUnit.NewShardedStorageUnitFromConf( + getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), + getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), + shardCoordinator.SelfId(), + ) + if err != nil { + return nil, err + } + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, txUnit) + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) + + return store, err } func createMetaChainDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + config *config.Config, + shardCoordinator sharding.Coordinator, + uniqueID string, ) (dataRetriever.StorageService, error) { - var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit - var shardHdrHashNonceUnits []*storageUnit.Unit - var err error - - defer func() { - // cleanup - if err != nil { - if peerDataUnit != nil { - _ = peerDataUnit.DestroyUnit() - } - if shardDataUnit != nil { - _ = shardDataUnit.DestroyUnit() - } - if metaBlockUnit != nil { - _ = metaBlockUnit.DestroyUnit() - } - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnits != nil { - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - _ = shardHdrHashNonceUnits[i].DestroyUnit() - } - } - } - }() - - metaBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - shardDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.ShardDataStorage.Cache), - getDBFromConfig(config.ShardDataStorage.DB, uniqueID), - getBloomFromConfig(config.ShardDataStorage.Bloom)) - if err != nil { - return nil, err - } - - peerDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerDataStorage.Cache), - getDBFromConfig(config.PeerDataStorage.DB, uniqueID), - getBloomFromConfig(config.PeerDataStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), - ) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnits = make([]*storageUnit.Unit, shardCoordinator.NumberOfShards()) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - shardHdrHashNonceUnits[i], err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - i, - ) - if err != nil { - return nil, err - } - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) - store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) - store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) - } - - return store, err + var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnits []*storageUnit.Unit + var err error + + defer func() { + // cleanup + if err != nil { + if peerDataUnit != nil { + _ = peerDataUnit.DestroyUnit() + } + if shardDataUnit != nil { + _ = shardDataUnit.DestroyUnit() + } + if metaBlockUnit != nil { + _ = metaBlockUnit.DestroyUnit() + } + if headerUnit != nil { + _ = headerUnit.DestroyUnit() + } + if metaHdrHashNonceUnit != nil { + _ = metaHdrHashNonceUnit.DestroyUnit() + } + if shardHdrHashNonceUnits != nil { + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + _ = shardHdrHashNonceUnits[i].DestroyUnit() + } + } + } + }() + + metaBlockUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaBlockStorage.Cache), + getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), + getBloomFromConfig(config.MetaBlockStorage.Bloom)) + if err != nil { + return nil, err + } + + shardDataUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.ShardDataStorage.Cache), + getDBFromConfig(config.ShardDataStorage.DB, uniqueID), + getBloomFromConfig(config.ShardDataStorage.Bloom)) + if err != nil { + return nil, err + } + + peerDataUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.PeerDataStorage.Cache), + getDBFromConfig(config.PeerDataStorage.DB, uniqueID), + getBloomFromConfig(config.PeerDataStorage.Bloom)) + if err != nil { + return nil, err + } + + headerUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.BlockHeaderStorage.Cache), + getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), + getBloomFromConfig(config.BlockHeaderStorage.Bloom)) + if err != nil { + return nil, err + } + + metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), + getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), + ) + if err != nil { + return nil, err + } + + shardHdrHashNonceUnits = make([]*storageUnit.Unit, shardCoordinator.NumberOfShards()) + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + shardHdrHashNonceUnits[i], err = storageUnit.NewShardedStorageUnitFromConf( + getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), + getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), + getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), + i, + ) + if err != nil { + return nil, err + } + } + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) + store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) + store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) + } + + return store, err } func createShardDataPoolFromConfig( - config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + config *config.Config, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.PoolsHolder, error) { - log.Info("creatingShardDataPool from config") - - txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) - if err != nil { - log.Info("error creating txpool") - return nil, err - } - - uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) - if err != nil { - log.Info("error creating smart contract result pool") - return nil, err - } - - rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) - if err != nil { - log.Info("error creating reward transaction pool") - return nil, err - } - - cacherCfg := getCacherFromConfig(config.BlockHeaderDataPool) - hdrPool, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating hdrpool") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating metaBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.BlockHeaderNoncesDataPool) - hdrNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating hdrNoncesCacher") - return nil, err - } - hdrNonces, err := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSliceConverter) - if err != nil { - log.Info("error creating hdrNonces") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) - txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating txBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.PeerBlockBodyDataPool) - peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating peerChangeBlockBody") - return nil, err - } - - return dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlockBody, - ) + log.Info("creatingShardDataPool from config") + + txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) + if err != nil { + log.Info("error creating txpool") + return nil, err + } + + uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) + if err != nil { + log.Info("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) + if err != nil { + log.Info("error creating reward transaction pool") + return nil, err + } + + cacherCfg := getCacherFromConfig(config.BlockHeaderDataPool) + hdrPool, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating hdrpool") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.MetaBlockBodyDataPool) + metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating metaBlockBody") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.BlockHeaderNoncesDataPool) + hdrNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating hdrNoncesCacher") + return nil, err + } + hdrNonces, err := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSliceConverter) + if err != nil { + log.Info("error creating hdrNonces") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) + txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating txBlockBody") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.PeerBlockBodyDataPool) + peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating peerChangeBlockBody") + return nil, err + } + + return dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlockBody, + ) } func createMetaDataPoolFromConfig( - config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + config *config.Config, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.MetaPoolsHolder, error) { - cacherCfg := getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating metaBlockBody") - return nil, err - } - - miniBlockHashes, err := shardedData.NewShardedData(getCacherFromConfig(config.MiniBlockHeaderHashesDataPool)) - if err != nil { - log.Info("error creating miniBlockHashes") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.ShardHeadersDataPool) - shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating shardHeaders") - return nil, err - } - - headersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Info("error creating shard headers nonces pool") - return nil, err - } - headersNonces, err := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSliceConverter) - if err != nil { - log.Info("error creating shard headers nonces pool") - return nil, err - } - - return dataPool.NewMetaDataPool(metaBlockBody, miniBlockHashes, shardHeaders, headersNonces) + cacherCfg := getCacherFromConfig(config.MetaBlockBodyDataPool) + metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating metaBlockBody") + return nil, err + } + + miniBlockHashes, err := shardedData.NewShardedData(getCacherFromConfig(config.MiniBlockHeaderHashesDataPool)) + if err != nil { + log.Info("error creating miniBlockHashes") + return nil, err + } + + cacherCfg = getCacherFromConfig(config.ShardHeadersDataPool) + shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating shardHeaders") + return nil, err + } + + headersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating shard headers nonces pool") + return nil, err + } + headersNonces, err := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSliceConverter) + if err != nil { + log.Info("error creating shard headers nonces pool") + return nil, err + } + + return dataPool.NewMetaDataPool(metaBlockBody, miniBlockHashes, shardHeaders, headersNonces) } func createSingleSigner(config *config.Config) (crypto.SingleSigner, error) { - switch config.Consensus.Type { - case BlsConsensusType: - return &singlesig.BlsSingleSigner{}, nil - case BnConsensusType: - return &singlesig.SchnorrSigner{}, nil - } - - return nil, errors.New("no consensus type provided in config file") + switch config.Consensus.Type { + case BlsConsensusType: + return &singlesig.BlsSingleSigner{}, nil + case BnConsensusType: + return &singlesig.SchnorrSigner{}, nil + } + + return nil, errors.New("no consensus type provided in config file") } func getMultisigHasherFromConfig(cfg *config.Config) (hashing.Hasher, error) { - if cfg.Consensus.Type == BlsConsensusType && cfg.MultisigHasher.Type != "blake2b" { - return nil, errors.New("wrong multisig hasher provided for bls consensus type") - } - - switch cfg.MultisigHasher.Type { - case "sha256": - return sha256.Sha256{}, nil - case "blake2b": - if cfg.Consensus.Type == BlsConsensusType { - return blake2b.Blake2b{HashSize: BlsHashSize}, nil - } - return blake2b.Blake2b{}, nil - } - - return nil, errors.New("no multisig hasher provided in config file") + if cfg.Consensus.Type == BlsConsensusType && cfg.MultisigHasher.Type != "blake2b" { + return nil, errors.New("wrong multisig hasher provided for bls consensus type") + } + + switch cfg.MultisigHasher.Type { + case "sha256": + return sha256.Sha256{}, nil + case "blake2b": + if cfg.Consensus.Type == BlsConsensusType { + return blake2b.Blake2b{HashSize: BlsHashSize}, nil + } + return blake2b.Blake2b{}, nil + } + + return nil, errors.New("no multisig hasher provided in config file") } func createMultiSigner( - config *config.Config, - hasher hashing.Hasher, - pubKeys []string, - privateKey crypto.PrivateKey, - keyGen crypto.KeyGenerator, + config *config.Config, + hasher hashing.Hasher, + pubKeys []string, + privateKey crypto.PrivateKey, + keyGen crypto.KeyGenerator, ) (crypto.MultiSigner, error) { - switch config.Consensus.Type { - case BlsConsensusType: - blsSigner := &blsMultiSig.KyberMultiSignerBLS{} - return multisig.NewBLSMultisig(blsSigner, hasher, pubKeys, privateKey, keyGen, uint16(0)) - case BnConsensusType: - return multisig.NewBelNevMultisig(hasher, pubKeys, privateKey, keyGen, uint16(0)) - } + switch config.Consensus.Type { + case BlsConsensusType: + blsSigner := &blsMultiSig.KyberMultiSignerBLS{} + return multisig.NewBLSMultisig(blsSigner, hasher, pubKeys, privateKey, keyGen, uint16(0)) + case BnConsensusType: + return multisig.NewBelNevMultisig(hasher, pubKeys, privateKey, keyGen, uint16(0)) + } - return nil, errors.New("no consensus type provided in config file") + return nil, errors.New("no consensus type provided in config file") } func createNetMessenger( - p2pConfig *config.P2PConfig, - log *logger.Logger, - randReader io.Reader, + p2pConfig *config.P2PConfig, + log *logger.Logger, + randReader io.Reader, ) (p2p.Messenger, error) { - if p2pConfig.Node.Port < 0 { - return nil, errors.New("cannot start node on port < 0") - } + if p2pConfig.Node.Port < 0 { + return nil, errors.New("cannot start node on port < 0") + } - pDiscoveryFactory := factoryP2P.NewPeerDiscovererCreator(*p2pConfig) - pDiscoverer, err := pDiscoveryFactory.CreatePeerDiscoverer() + pDiscoveryFactory := factoryP2P.NewPeerDiscovererCreator(*p2pConfig) + pDiscoverer, err := pDiscoveryFactory.CreatePeerDiscoverer() - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - log.Info(fmt.Sprintf("Starting with peer discovery: %s", pDiscoverer.Name())) + log.Info(fmt.Sprintf("Starting with peer discovery: %s", pDiscoverer.Name())) - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), randReader) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), randReader) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - nm, err := libp2p.NewNetworkMessenger( - context.Background(), - p2pConfig.Node.Port, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - pDiscoverer, - libp2p.ListenAddrWithIp4AndTcp, - ) - if err != nil { - return nil, err - } + nm, err := libp2p.NewNetworkMessenger( + context.Background(), + p2pConfig.Node.Port, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + pDiscoverer, + libp2p.ListenAddrWithIp4AndTcp, + ) + if err != nil { + return nil, err + } - return nm, nil + return nm, nil } func newInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory( - shardCoordinator, - nodesCoordinator, - data, - core, - crypto, - state, - network, - ) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory( - shardCoordinator, - nodesCoordinator, - data, - core, - crypto, - network, - ) - } - - return nil, nil, errors.New("could not create interceptor and resolver container factory") + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return newShardInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + state, + network, + ) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return newMetaInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + network, + ) + } + + return nil, nil, errors.New("could not create interceptor and resolver container factory") } func newShardInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - state *State, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + state *State, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator - interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - core.Hasher, - crypto.TxSignKeyGen, - crypto.TxSingleSigner, - crypto.MultiSigner, - data.Datapool, - state.AddressConverter, - ) - if err != nil { - return nil, nil, err - } - - dataPacker, err := partitioning.NewSizeDataPacker(core.Marshalizer) - if err != nil { - return nil, nil, err - } - - resolversContainerFactory, err := shardfactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - data.Datapool, - core.Uint64ByteSliceConverter, - dataPacker, - ) - if err != nil { - return nil, nil, err - } - - return interceptorContainerFactory, resolversContainerFactory, nil + //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + core.Hasher, + crypto.TxSignKeyGen, + crypto.TxSingleSigner, + crypto.MultiSigner, + data.Datapool, + state.AddressConverter, + ) + if err != nil { + return nil, nil, err + } + + dataPacker, err := partitioning.NewSizeDataPacker(core.Marshalizer) + if err != nil { + return nil, nil, err + } + + resolversContainerFactory, err := shardfactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + data.Datapool, + core.Uint64ByteSliceConverter, + dataPacker, + ) + if err != nil { + return nil, nil, err + } + + return interceptorContainerFactory, resolversContainerFactory, nil } func newMetaInterceptorAndResolverContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - data *Data, - core *Core, - crypto *Crypto, - network *Network, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + data *Data, + core *Core, + crypto *Crypto, + network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator - interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - core.Hasher, - crypto.MultiSigner, - data.MetaDatapool, - ) - if err != nil { - return nil, nil, err - } - resolversContainerFactory, err := metafactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - network.NetMessenger, - data.Store, - core.Marshalizer, - data.MetaDatapool, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - return interceptorContainerFactory, resolversContainerFactory, nil + //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + core.Hasher, + crypto.MultiSigner, + data.MetaDatapool, + ) + if err != nil { + return nil, nil, err + } + resolversContainerFactory, err := metafactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + network.NetMessenger, + data.Store, + core.Marshalizer, + data.MetaDatapool, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + return interceptorContainerFactory, resolversContainerFactory, nil } func generateGenesisHeadersAndApplyInitialBalances( - coreComponents *Core, - stateComponents *State, - shardCoordinator sharding.Coordinator, - nodesSetup *sharding.NodesSetup, - genesisConfig *sharding.Genesis, + coreComponents *Core, + stateComponents *State, + shardCoordinator sharding.Coordinator, + nodesSetup *sharding.NodesSetup, + genesisConfig *sharding.Genesis, ) (map[uint32]data.HeaderHandler, error) { - //TODO change this rudimentary startup for metachain nodes - // Talk between Adrian, Robert and Iulian, did not want it to be discarded: - // -------------------------------------------------------------------- - // Adrian: "This looks like a workaround as the metchain should not deal with individual accounts, but shards data. - // What I was thinking was that the genesis on metachain (or pre-genesis block) is the nodes allocation to shards, - // with 0 state root for every shard, as there is no balance yet. - // Then the shards start operating as they get the initial node allocation, maybe we can do consensus on the - // genesis as well, I think this would be actually good as then everything is signed and agreed upon. - // The genesis shard blocks need to be then just the state root, I think we already have that in genesis, - // so shard nodes can go ahead with individually creating the block, but then run consensus on this. - // Then this block is sent to metachain who updates the state root of every shard and creates the metablock for - // the genesis of each of the shards (this is actually the same thing that would happen at new epoch start)." - - shardsGenesisBlocks := make(map[uint32]data.HeaderHandler) - - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - isCurrentShard := shardId == shardCoordinator.SelfId() - if isCurrentShard { - continue - } - - newShardCoordinator, account, err := createInMemoryShardCoordinatorAndAccount( - coreComponents, - shardCoordinator.NumberOfShards(), - shardId, - ) - if err != nil { - return nil, err - } - - genesisBlock, err := createGenesisBlockAndApplyInitialBalances( - account, - newShardCoordinator, - stateComponents.AddressConverter, - genesisConfig, - uint64(nodesSetup.StartTime), - ) - if err != nil { - return nil, err - } - - shardsGenesisBlocks[shardId] = genesisBlock - } - - genesisBlockForCurrentShard, err := createGenesisBlockAndApplyInitialBalances( - stateComponents.AccountsAdapter, - shardCoordinator, - stateComponents.AddressConverter, - genesisConfig, - uint64(nodesSetup.StartTime), - ) - if err != nil { - return nil, err - } - - shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard - - genesisBlock, err := genesis.CreateMetaGenesisBlock( - uint64(nodesSetup.StartTime), - nodesSetup.InitialNodesPubKeys(), - ) - - if err != nil { - return nil, err - } - - shardsGenesisBlocks[sharding.MetachainShardId] = genesisBlock - - return shardsGenesisBlocks, nil + //TODO change this rudimentary startup for metachain nodes + // Talk between Adrian, Robert and Iulian, did not want it to be discarded: + // -------------------------------------------------------------------- + // Adrian: "This looks like a workaround as the metchain should not deal with individual accounts, but shards data. + // What I was thinking was that the genesis on metachain (or pre-genesis block) is the nodes allocation to shards, + // with 0 state root for every shard, as there is no balance yet. + // Then the shards start operating as they get the initial node allocation, maybe we can do consensus on the + // genesis as well, I think this would be actually good as then everything is signed and agreed upon. + // The genesis shard blocks need to be then just the state root, I think we already have that in genesis, + // so shard nodes can go ahead with individually creating the block, but then run consensus on this. + // Then this block is sent to metachain who updates the state root of every shard and creates the metablock for + // the genesis of each of the shards (this is actually the same thing that would happen at new epoch start)." + + shardsGenesisBlocks := make(map[uint32]data.HeaderHandler) + + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + isCurrentShard := shardId == shardCoordinator.SelfId() + if isCurrentShard { + continue + } + + newShardCoordinator, account, err := createInMemoryShardCoordinatorAndAccount( + coreComponents, + shardCoordinator.NumberOfShards(), + shardId, + ) + if err != nil { + return nil, err + } + + genesisBlock, err := createGenesisBlockAndApplyInitialBalances( + account, + newShardCoordinator, + stateComponents.AddressConverter, + genesisConfig, + uint64(nodesSetup.StartTime), + ) + if err != nil { + return nil, err + } + + shardsGenesisBlocks[shardId] = genesisBlock + } + + genesisBlockForCurrentShard, err := createGenesisBlockAndApplyInitialBalances( + stateComponents.AccountsAdapter, + shardCoordinator, + stateComponents.AddressConverter, + genesisConfig, + uint64(nodesSetup.StartTime), + ) + if err != nil { + return nil, err + } + + shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard + + genesisBlock, err := genesis.CreateMetaGenesisBlock( + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) + + if err != nil { + return nil, err + } + + shardsGenesisBlocks[sharding.MetachainShardId] = genesisBlock + + return shardsGenesisBlocks, nil } func createGenesisBlockAndApplyInitialBalances( - accounts state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - addressConverter state.AddressConverter, - genesisConfig *sharding.Genesis, - startTime uint64, + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + addressConverter state.AddressConverter, + genesisConfig *sharding.Genesis, + startTime uint64, ) (data.HeaderHandler, error) { - initialBalances, err := genesisConfig.InitialNodesBalances(shardCoordinator, addressConverter) - if err != nil { - return nil, err - } - - return genesis.CreateShardGenesisBlockFromInitialBalances( - accounts, - shardCoordinator, - addressConverter, - initialBalances, - startTime, - ) + initialBalances, err := genesisConfig.InitialNodesBalances(shardCoordinator, addressConverter) + if err != nil { + return nil, err + } + + return genesis.CreateShardGenesisBlockFromInitialBalances( + accounts, + shardCoordinator, + addressConverter, + initialBalances, + startTime, + ) } func createInMemoryShardCoordinatorAndAccount( - coreComponents *Core, - numOfShards uint32, - shardId uint32, + coreComponents *Core, + numOfShards uint32, + shardId uint32, ) (sharding.Coordinator, state.AccountsAdapter, error) { - newShardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, shardId) - if err != nil { - return nil, nil, err - } + newShardCoordinator, err := sharding.NewMultiShardCoordinator(numOfShards, shardId) + if err != nil { + return nil, nil, err + } - accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) - if err != nil { - return nil, nil, err - } + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) + if err != nil { + return nil, nil, err + } - accounts := generateInMemoryAccountsAdapter( - accountFactory, - coreComponents.Hasher, - coreComponents.Marshalizer, - ) + accounts := generateInMemoryAccountsAdapter( + accountFactory, + coreComponents.Hasher, + coreComponents.Marshalizer, + ) - return newShardCoordinator, accounts, nil + return newShardCoordinator, accounts, nil } func newBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - economicsConfig *config.EconomicsConfig, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + economicsConfig *config.EconomicsConfig, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { - return nil, nil, errors.New("rewards configuration missing") - } - - communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) - if err != nil { - return nil, nil, err - } - - burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) - if err != nil { - return nil, nil, err - } - - specialAddressHolder, err := address.NewSpecialAddressHolder( - communityAddress, - burnAddress, - state.AddressConverter, - shardCoordinator) - if err != nil { - return nil, nil, err - } - - // TODO: remove nodesConfig as no longer needed with nodes coordinator available - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker( - resolversFinder, - shardCoordinator, - nodesCoordinator, - specialAddressHolder, - data, - core, - state, - forkDetector, - shardsGenesisBlocks, - nodesConfig, - coreServiceContainer, - ) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker( - resolversFinder, - shardCoordinator, - nodesCoordinator, - specialAddressHolder, - data, - core, - state, - forkDetector, - shardsGenesisBlocks, - coreServiceContainer, - ) - } - - return nil, nil, errors.New("could not create block processor and tracker") + if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { + return nil, nil, errors.New("rewards configuration missing") + } + + communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) + if err != nil { + return nil, nil, err + } + + burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) + if err != nil { + return nil, nil, err + } + + specialAddressHolder, err := address.NewSpecialAddressHolder( + communityAddress, + burnAddress, + state.AddressConverter, + shardCoordinator) + if err != nil { + return nil, nil, err + } + + // TODO: remove nodesConfig as no longer needed with nodes coordinator available + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return newShardBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + nodesConfig, + coreServiceContainer, + ) + } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return newMetaBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + ) + } + + return nil, nil, errors.New("could not create block processor and tracker") } func newShardBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - argsParser, err := smartContract.NewAtArgumentParser() - if err != nil { - return nil, nil, err - } - - vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) - if err != nil { - return nil, nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, nil, err - } - - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - core.Marshalizer, - core.Hasher, - state.AddressConverter, - specialAddressHandler, - data.Store, - data.Datapool, - ) - if err != nil { - return nil, nil, err - } - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, nil, err - } - - rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) - if err != nil { - return nil, nil, err - } - - rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - scProcessor, err := smartContract.NewSmartContractProcessor( - vmContainer, - argsParser, - core.Hasher, - core.Marshalizer, - state.AccountsAdapter, - vmFactory.VMAccountsDB(), - state.AddressConverter, - shardCoordinator, - scForwarder, - rewardsTxHandler, - ) - if err != nil { - return nil, nil, err - } - - requestHandler, err := requestHandlers.NewShardResolverRequestHandler( - resolversFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - MaxTxsToRequest, - ) - if err != nil { - return nil, nil, err - } - - rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( - state.AccountsAdapter, - state.AddressConverter, - shardCoordinator, - rewardsTxInterim, - ) - if err != nil { - return nil, nil, err - } - - txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) - if err != nil { - return nil, nil, err - } - - transactionProcessor, err := transaction.NewTxProcessor( - state.AccountsAdapter, - core.Hasher, - state.AddressConverter, - core.Marshalizer, - shardCoordinator, - scProcessor, - rewardsTxHandler, - txTypeHandler, - ) - if err != nil { - return nil, nil, errors.New("could not create transaction processor: " + err.Error()) - } - - blockTracker, err := track.NewShardBlockTracker( - data.Datapool, - core.Marshalizer, - shardCoordinator, - data.Store, - ) - if err != nil { - return nil, nil, err - } - - preProcFactory, err := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - data.Store, - core.Marshalizer, - core.Hasher, - data.Datapool, - state.AddressConverter, - state.AccountsAdapter, - requestHandler, - transactionProcessor, - scProcessor, - scProcessor, - rewardsTxProcessor, - ) - if err != nil { - return nil, nil, err - } - - preProcContainer, err := preProcFactory.Create() - if err != nil { - return nil, nil, err - } - - txCoordinator, err := coordinator.NewTransactionCoordinator( - shardCoordinator, - state.AccountsAdapter, - data.Datapool, - requestHandler, - preProcContainer, - interimProcContainer, - ) - if err != nil { - return nil, nil, err - } - - blockProcessor, err := block.NewShardProcessor( - coreServiceContainer, - data.Datapool, - data.Store, - core.Hasher, - core.Marshalizer, - state.AccountsAdapter, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - forkDetector, - blockTracker, - shardsGenesisBlocks, - requestHandler, - txCoordinator, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) - } - - err = blockProcessor.SetAppStatusHandler(core.StatusHandler) - if err != nil { - return nil, nil, err - } - - return blockProcessor, blockTracker, nil + argsParser, err := smartContract.NewAtArgumentParser() + if err != nil { + return nil, nil, err + } + + vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) + if err != nil { + return nil, nil, err + } + + vmContainer, err := vmFactory.Create() + if err != nil { + return nil, nil, err + } + + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + core.Marshalizer, + core.Hasher, + state.AddressConverter, + specialAddressHandler, + data.Store, + data.Datapool, + ) + if err != nil { + return nil, nil, err + } + + interimProcContainer, err := interimProcFactory.Create() + if err != nil { + return nil, nil, err + } + + scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return nil, nil, err + } + + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) + if err != nil { + return nil, nil, err + } + + rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + scProcessor, err := smartContract.NewSmartContractProcessor( + vmContainer, + argsParser, + core.Hasher, + core.Marshalizer, + state.AccountsAdapter, + vmFactory.VMAccountsDB(), + state.AddressConverter, + shardCoordinator, + scForwarder, + rewardsTxHandler, + ) + if err != nil { + return nil, nil, err + } + + requestHandler, err := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + MaxTxsToRequest, + ) + if err != nil { + return nil, nil, err + } + + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( + state.AccountsAdapter, + state.AddressConverter, + shardCoordinator, + rewardsTxInterim, + ) + if err != nil { + return nil, nil, err + } + + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) + if err != nil { + return nil, nil, err + } + + transactionProcessor, err := transaction.NewTxProcessor( + state.AccountsAdapter, + core.Hasher, + state.AddressConverter, + core.Marshalizer, + shardCoordinator, + scProcessor, + rewardsTxHandler, + txTypeHandler, + ) + if err != nil { + return nil, nil, errors.New("could not create transaction processor: " + err.Error()) + } + + blockTracker, err := track.NewShardBlockTracker( + data.Datapool, + core.Marshalizer, + shardCoordinator, + data.Store, + ) + if err != nil { + return nil, nil, err + } + + preProcFactory, err := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + data.Store, + core.Marshalizer, + core.Hasher, + data.Datapool, + state.AddressConverter, + state.AccountsAdapter, + requestHandler, + transactionProcessor, + scProcessor, + scProcessor, + rewardsTxProcessor, + ) + if err != nil { + return nil, nil, err + } + + preProcContainer, err := preProcFactory.Create() + if err != nil { + return nil, nil, err + } + + txCoordinator, err := coordinator.NewTransactionCoordinator( + shardCoordinator, + state.AccountsAdapter, + data.Datapool, + requestHandler, + preProcContainer, + interimProcContainer, + ) + if err != nil { + return nil, nil, err + } + + blockProcessor, err := block.NewShardProcessor( + coreServiceContainer, + data.Datapool, + data.Store, + core.Hasher, + core.Marshalizer, + state.AccountsAdapter, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + forkDetector, + blockTracker, + shardsGenesisBlocks, + requestHandler, + txCoordinator, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, errors.New("could not create block processor: " + err.Error()) + } + + err = blockProcessor.SetAppStatusHandler(core.StatusHandler) + if err != nil { + return nil, nil, err + } + + return blockProcessor, blockTracker, nil } func newMetaBlockProcessorAndTracker( - resolversFinder dataRetriever.ResolversFinder, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - data *Data, - core *Core, - state *State, - forkDetector process.ForkDetector, - shardsGenesisBlocks map[uint32]data.HeaderHandler, - coreServiceContainer serviceContainer.Core, + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + data *Data, + core *Core, + state *State, + forkDetector process.ForkDetector, + shardsGenesisBlocks map[uint32]data.HeaderHandler, + coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( - resolversFinder, - factory.ShardHeadersForMetachainTopic, - ) - - if err != nil { - return nil, nil, err - } - - blockTracker, err := track.NewMetaBlockTracker() - if err != nil { - return nil, nil, err - } - - metaProcessor, err := block.NewMetaProcessor( - coreServiceContainer, - state.AccountsAdapter, - data.MetaDatapool, - forkDetector, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - core.Hasher, - core.Marshalizer, - data.Store, - shardsGenesisBlocks, - requestHandler, - core.Uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) - } - - err = metaProcessor.SetAppStatusHandler(core.StatusHandler) - if err != nil { - return nil, nil, err - } - - return metaProcessor, blockTracker, nil + requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( + resolversFinder, + factory.ShardHeadersForMetachainTopic, + ) + + if err != nil { + return nil, nil, err + } + + blockTracker, err := track.NewMetaBlockTracker() + if err != nil { + return nil, nil, err + } + + metaProcessor, err := block.NewMetaProcessor( + coreServiceContainer, + state.AccountsAdapter, + data.MetaDatapool, + forkDetector, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + core.Hasher, + core.Marshalizer, + data.Store, + shardsGenesisBlocks, + requestHandler, + core.Uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, errors.New("could not create block processor: " + err.Error()) + } + + err = metaProcessor.SetAppStatusHandler(core.StatusHandler) + if err != nil { + return nil, nil, err + } + + return metaProcessor, blockTracker, nil } func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { - return storageUnit.CacheConfig{ - Size: cfg.Size, - Type: storageUnit.CacheType(cfg.Type), - Shards: cfg.Shards, - } + return storageUnit.CacheConfig{ + Size: cfg.Size, + Type: storageUnit.CacheType(cfg.Type), + Shards: cfg.Shards, + } } func getDBFromConfig(cfg config.DBConfig, uniquePath string) storageUnit.DBConfig { - return storageUnit.DBConfig{ - FilePath: filepath.Join(uniquePath, cfg.FilePath), - Type: storageUnit.DBType(cfg.Type), - MaxBatchSize: cfg.MaxBatchSize, - BatchDelaySeconds: cfg.BatchDelaySeconds, - MaxOpenFiles: cfg.MaxOpenFiles, - } + return storageUnit.DBConfig{ + FilePath: filepath.Join(uniquePath, cfg.FilePath), + Type: storageUnit.DBType(cfg.Type), + MaxBatchSize: cfg.MaxBatchSize, + BatchDelaySeconds: cfg.BatchDelaySeconds, + MaxOpenFiles: cfg.MaxOpenFiles, + } } func getBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { - var hashFuncs []storageUnit.HasherType - if cfg.HashFunc != nil { - hashFuncs = make([]storageUnit.HasherType, 0) - for _, hf := range cfg.HashFunc { - hashFuncs = append(hashFuncs, storageUnit.HasherType(hf)) - } - } - - return storageUnit.BloomConfig{ - Size: cfg.Size, - HashFunc: hashFuncs, - } + var hashFuncs []storageUnit.HasherType + if cfg.HashFunc != nil { + hashFuncs = make([]storageUnit.HasherType, 0) + for _, hf := range cfg.HashFunc { + hashFuncs = append(hashFuncs, storageUnit.HasherType(hf)) + } + } + + return storageUnit.BloomConfig{ + Size: cfg.Size, + HashFunc: hashFuncs, + } } func generateInMemoryAccountsAdapter( - accountFactory state.AccountFactory, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, + accountFactory state.AccountFactory, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, ) state.AccountsAdapter { - tr, _ := trie.NewTrie(createMemUnit(), marshalizer, hasher) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, accountFactory) + tr, _ := trie.NewTrie(createMemUnit(), marshalizer, hasher) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, accountFactory) - return adb + return adb } func createMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.New() + unit, _ := storageUnit.NewStorageUnit(cache, persist) + return unit } // GetSigningParams returns a key generator, a private key, and a public key func GetSigningParams( - ctx *cli.Context, - log *logger.Logger, - skName string, - skIndexName string, - skPemFileName string, - suite crypto.Suite, + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, + suite crypto.Suite, ) (keyGen crypto.KeyGenerator, privKey crypto.PrivateKey, pubKey crypto.PublicKey, err error) { - sk, err := getSk(ctx, log, skName, skIndexName, skPemFileName) - if err != nil { - return nil, nil, nil, err - } + sk, err := getSk(ctx, log, skName, skIndexName, skPemFileName) + if err != nil { + return nil, nil, nil, err + } - keyGen = signing.NewKeyGenerator(suite) + keyGen = signing.NewKeyGenerator(suite) - privKey, err = keyGen.PrivateKeyFromByteArray(sk) - if err != nil { - return nil, nil, nil, err - } + privKey, err = keyGen.PrivateKeyFromByteArray(sk) + if err != nil { + return nil, nil, nil, err + } - pubKey = privKey.GeneratePublic() + pubKey = privKey.GeneratePublic() - return keyGen, privKey, pubKey, err + return keyGen, privKey, pubKey, err } // GetPkEncoded returns the encoded public key func GetPkEncoded(pubKey crypto.PublicKey) string { - pk, err := pubKey.ToByteArray() - if err != nil { - return err.Error() - } + pk, err := pubKey.ToByteArray() + if err != nil { + return err.Error() + } - return encodeAddress(pk) + return encodeAddress(pk) } func encodeAddress(address []byte) string { - return hex.EncodeToString(address) + return hex.EncodeToString(address) } func decodeAddress(address string) ([]byte, error) { - return hex.DecodeString(address) + return hex.DecodeString(address) } func getSk( - ctx *cli.Context, - log *logger.Logger, - skName string, - skIndexName string, - skPemFileName string, + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, ) ([]byte, error) { - //if flag is defined, it shall overwrite what was read from pem file - if ctx.GlobalIsSet(skName) { - encodedSk := []byte(ctx.GlobalString(skName)) - return decodeAddress(string(encodedSk)) - } + //if flag is defined, it shall overwrite what was read from pem file + if ctx.GlobalIsSet(skName) { + encodedSk := []byte(ctx.GlobalString(skName)) + return decodeAddress(string(encodedSk)) + } - skIndex := ctx.GlobalInt(skIndexName) - encodedSk, err := core.LoadSkFromPemFile(skPemFileName, log, skIndex) - if err != nil { - return nil, err - } + skIndex := ctx.GlobalInt(skIndexName) + encodedSk, err := core.LoadSkFromPemFile(skPemFileName, log, skIndex) + if err != nil { + return nil, err + } - return decodeAddress(string(encodedSk)) + return decodeAddress(string(encodedSk)) } diff --git a/config/config.go b/config/config.go index 60a45ef4d75..c10f42ec108 100644 --- a/config/config.go +++ b/config/config.go @@ -4,192 +4,192 @@ import "time" // CacheConfig will map the json cache configuration type CacheConfig struct { - Size uint32 `json:"size"` - Type string `json:"type"` - Shards uint32 `json:"shards"` + Size uint32 `json:"size"` + Type string `json:"type"` + Shards uint32 `json:"shards"` } // DBConfig will map the json db configuration type DBConfig struct { - FilePath string `json:"file"` - Type string `json:"type"` - BatchDelaySeconds int `json:"batchDelaySeconds"` - MaxBatchSize int `json:"maxBatchSize"` - MaxOpenFiles int `json:"maxOpenFiles"` + FilePath string `json:"file"` + Type string `json:"type"` + BatchDelaySeconds int `json:"batchDelaySeconds"` + MaxBatchSize int `json:"maxBatchSize"` + MaxOpenFiles int `json:"maxOpenFiles"` } // BloomFilterConfig will map the json bloom filter configuration type BloomFilterConfig struct { - Size uint `json:"size"` - HashFunc []string `json:"hashFunc"` + Size uint `json:"size"` + HashFunc []string `json:"hashFunc"` } // StorageConfig will map the json storage unit configuration type StorageConfig struct { - Cache CacheConfig `json:"cache"` - DB DBConfig `json:"db"` - Bloom BloomFilterConfig `json:"bloom"` + Cache CacheConfig `json:"cache"` + DB DBConfig `json:"db"` + Bloom BloomFilterConfig `json:"bloom"` } // LoggerConfig will map the json logger configuration type LoggerConfig struct { - Path string `json:"path"` - StackTraceDepth int `json:"stackTraceDepth"` + Path string `json:"path"` + StackTraceDepth int `json:"stackTraceDepth"` } // AddressConfig will map the json address configuration type AddressConfig struct { - Length int `json:"length"` - Prefix string `json:"prefix"` + Length int `json:"length"` + Prefix string `json:"prefix"` } // TypeConfig will map the json string type configuration type TypeConfig struct { - Type string `json:"type"` + Type string `json:"type"` } // NTPConfig will hold the configuration for NTP queries type NTPConfig struct { - Host string - Port int - Timeout time.Duration - Version int + Host string + Port int + Timeout time.Duration + Version int } // EconomicsConfig will hold the reward configuration type EconomicsConfig struct { - CommunityAddress string - BurnAddress string + CommunityAddress string + BurnAddress string } // Config will hold the entire application configuration parameters type Config struct { - MiniBlocksStorage StorageConfig - PeerBlockBodyStorage StorageConfig - BlockHeaderStorage StorageConfig - TxStorage StorageConfig - UnsignedTransactionStorage StorageConfig - RewardTxStorage StorageConfig - ShardHdrNonceHashStorage StorageConfig - MetaHdrNonceHashStorage StorageConfig - - ShardDataStorage StorageConfig - MetaBlockStorage StorageConfig - PeerDataStorage StorageConfig - - AccountsTrieStorage StorageConfig - BadBlocksCache CacheConfig - - TxBlockBodyDataPool CacheConfig - StateBlockBodyDataPool CacheConfig - PeerBlockBodyDataPool CacheConfig - BlockHeaderDataPool CacheConfig - BlockHeaderNoncesDataPool CacheConfig - TxDataPool CacheConfig - UnsignedTransactionDataPool CacheConfig - RewardTransactionDataPool CacheConfig - MetaBlockBodyDataPool CacheConfig - - MiniBlockHeaderHashesDataPool CacheConfig - ShardHeadersDataPool CacheConfig - MetaHeaderNoncesDataPool CacheConfig - - Logger LoggerConfig - Address AddressConfig - Hasher TypeConfig - MultisigHasher TypeConfig - Marshalizer TypeConfig - - ResourceStats ResourceStatsConfig - Heartbeat HeartbeatConfig - GeneralSettings GeneralSettingsConfig - Consensus TypeConfig - Explorer ExplorerConfig - - NTPConfig NTPConfig - EconomicsConfig EconomicsConfig + MiniBlocksStorage StorageConfig + PeerBlockBodyStorage StorageConfig + BlockHeaderStorage StorageConfig + TxStorage StorageConfig + UnsignedTransactionStorage StorageConfig + RewardTxStorage StorageConfig + ShardHdrNonceHashStorage StorageConfig + MetaHdrNonceHashStorage StorageConfig + + ShardDataStorage StorageConfig + MetaBlockStorage StorageConfig + PeerDataStorage StorageConfig + + AccountsTrieStorage StorageConfig + BadBlocksCache CacheConfig + + TxBlockBodyDataPool CacheConfig + StateBlockBodyDataPool CacheConfig + PeerBlockBodyDataPool CacheConfig + BlockHeaderDataPool CacheConfig + BlockHeaderNoncesDataPool CacheConfig + TxDataPool CacheConfig + UnsignedTransactionDataPool CacheConfig + RewardTransactionDataPool CacheConfig + MetaBlockBodyDataPool CacheConfig + + MiniBlockHeaderHashesDataPool CacheConfig + ShardHeadersDataPool CacheConfig + MetaHeaderNoncesDataPool CacheConfig + + Logger LoggerConfig + Address AddressConfig + Hasher TypeConfig + MultisigHasher TypeConfig + Marshalizer TypeConfig + + ResourceStats ResourceStatsConfig + Heartbeat HeartbeatConfig + GeneralSettings GeneralSettingsConfig + Consensus TypeConfig + Explorer ExplorerConfig + + NTPConfig NTPConfig + EconomicsConfig EconomicsConfig } // NodeConfig will hold basic p2p settings type NodeConfig struct { - Port int - Seed string + Port int + Seed string } // MdnsPeerDiscoveryConfig will hold the mdns discovery config settings type MdnsPeerDiscoveryConfig struct { - Enabled bool - RefreshIntervalInSec int - ServiceTag string + Enabled bool + RefreshIntervalInSec int + ServiceTag string } // KadDhtPeerDiscoveryConfig will hold the kad-dht discovery config settings type KadDhtPeerDiscoveryConfig struct { - Enabled bool - RefreshIntervalInSec int - RandezVous string - InitialPeerList []string + Enabled bool + RefreshIntervalInSec int + RandezVous string + InitialPeerList []string } // P2PConfig will hold all the P2P settings type P2PConfig struct { - Node NodeConfig - MdnsPeerDiscovery MdnsPeerDiscoveryConfig - KadDhtPeerDiscovery KadDhtPeerDiscoveryConfig + Node NodeConfig + MdnsPeerDiscovery MdnsPeerDiscoveryConfig + KadDhtPeerDiscovery KadDhtPeerDiscoveryConfig } // ResourceStatsConfig will hold all resource stats settings type ResourceStatsConfig struct { - Enabled bool - RefreshIntervalInSec int + Enabled bool + RefreshIntervalInSec int } // HeartbeatConfig will hold all heartbeat settings type HeartbeatConfig struct { - Enabled bool - MinTimeToWaitBetweenBroadcastsInSec int - MaxTimeToWaitBetweenBroadcastsInSec int - DurationInSecToConsiderUnresponsive int + Enabled bool + MinTimeToWaitBetweenBroadcastsInSec int + MaxTimeToWaitBetweenBroadcastsInSec int + DurationInSecToConsiderUnresponsive int } // GeneralSettingsConfig will hold the general settings for a node type GeneralSettingsConfig struct { - DestinationShardAsObserver string - NetworkID string - StatusPollingIntervalSec int - NodeDisplayName string + DestinationShardAsObserver string + NetworkID string + StatusPollingIntervalSec int + NodeDisplayName string } // ExplorerConfig will hold the configuration for the explorer indexer type ExplorerConfig struct { - Enabled bool - IndexerURL string + Enabled bool + IndexerURL string } // ServersConfig will hold all the confidential settings for servers type ServersConfig struct { - ElasticSearch ElasticSearchConfig - Prometheus PrometheusConfig + ElasticSearch ElasticSearchConfig + Prometheus PrometheusConfig } // PrometheusConfig will hold configuration for prometheus, such as the join URL type PrometheusConfig struct { - PrometheusBaseURL string - JoinRoute string - StatusRoute string + PrometheusBaseURL string + JoinRoute string + StatusRoute string } // ElasticSearchConfig will hold the configuration for the elastic search type ElasticSearchConfig struct { - Username string - Password string + Username string + Password string } // FacadeConfig will hold different configuration option that will be passed to the main ElrondFacade type FacadeConfig struct { - RestApiPort string - PprofEnabled bool - Prometheus bool - PrometheusJoinURL string - PrometheusJobName string + RestApiPort string + PprofEnabled bool + Prometheus bool + PrometheusJoinURL string + PrometheusJobName string } diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index d703d363073..02e28afe9f5 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -1,70 +1,70 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorMock) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { @@ -72,8 +72,8 @@ func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusReward // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 7e01117c570..33cc044bf24 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -1,110 +1,110 @@ package address import ( - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type specialAddresses struct { - elrond []byte - consensusRewardAddresses []string - burnAddress []byte - - epoch uint32 - round uint64 - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + elrond []byte + consensusRewardAddresses []string + burnAddress []byte + + epoch uint32 + round uint64 + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewSpecialAddressHolder creates a special address holder func NewSpecialAddressHolder( - elrond []byte, - burnAddress []byte, - adrConv state.AddressConverter, - shardCoordinator sharding.Coordinator, + elrond []byte, + burnAddress []byte, + adrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, ) (*specialAddresses, error) { - if elrond == nil { - return nil, data.ErrNilElrondAddress - } - if burnAddress == nil { - return nil, data.ErrNilBurnAddress - } - if adrConv == nil { - return nil, data.ErrNilAddressConverter - } - if shardCoordinator == nil { - return nil, data.ErrNilShardCoordinator - } - - sp := &specialAddresses{ - elrond: elrond, - burnAddress: burnAddress, - adrConv: adrConv, - shardCoordinator: shardCoordinator, - } - - return sp, nil + if elrond == nil { + return nil, data.ErrNilElrondAddress + } + if burnAddress == nil { + return nil, data.ErrNilBurnAddress + } + if adrConv == nil { + return nil, data.ErrNilAddressConverter + } + if shardCoordinator == nil { + return nil, data.ErrNilShardCoordinator + } + + sp := &specialAddresses{ + elrond: elrond, + burnAddress: burnAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + } + + return sp, nil } // SetElrondCommunityAddress sets elrond address func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { - sp.elrond = elrond + sp.elrond = elrond } // ElrondCommunityAddress provides elrond address func (sp *specialAddresses) ElrondCommunityAddress() []byte { - return sp.elrond + return sp.elrond } // BurnAddress provides burn address func (sp *specialAddresses) BurnAddress() []byte { - return sp.burnAddress + return sp.burnAddress } // SetConsensusData sets the consensus rewards addresses for the round func (sp *specialAddresses) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sp.consensusRewardAddresses = consensusRewardAddresses - sp.round = round - sp.epoch = epoch + sp.consensusRewardAddresses = consensusRewardAddresses + sp.round = round + sp.epoch = epoch } // LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - if len(sp.consensusRewardAddresses) == 0 { - return nil - } + if len(sp.consensusRewardAddresses) == 0 { + return nil + } - return []byte(sp.consensusRewardAddresses[0]) + return []byte(sp.consensusRewardAddresses[0]) } // ConsensusRewardAddresses provides the consensus reward addresses func (sp *specialAddresses) ConsensusRewardAddresses() []string { - return sp.consensusRewardAddresses + return sp.consensusRewardAddresses } func (sp *specialAddresses) Round() uint64 { - return sp.round + return sp.round } func (sp *specialAddresses) Epoch() uint32 { - return sp.epoch + return sp.epoch } // ShardIdForAddress calculates shard id for address func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { - convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) - if err != nil { - return 0, err - } + convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) + if err != nil { + return 0, err + } - return sp.shardCoordinator.ComputeId(convAdr), nil + return sp.shardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sp *specialAddresses) IsInterfaceNil() bool { - if sp == nil { - return true - } - return false + if sp == nil { + return true + } + return false } diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 0c77183dbdc..8c24880ec80 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -1,542 +1,542 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/core/random" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/topicResolverSender" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/core/random" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/topicResolverSender" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" ) const emptyExcludePeersOnTopic = "" type resolversContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - dataPools dataRetriever.PoolsHolder - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - intRandomizer dataRetriever.IntRandomizer - dataPacker dataRetriever.DataPacker + shardCoordinator sharding.Coordinator + messenger dataRetriever.TopicMessageHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + dataPools dataRetriever.PoolsHolder + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + intRandomizer dataRetriever.IntRandomizer + dataPacker dataRetriever.DataPacker } // NewResolversContainerFactory creates a new container filled with topic resolvers func NewResolversContainerFactory( - shardCoordinator sharding.Coordinator, - messenger dataRetriever.TopicMessageHandler, - store dataRetriever.StorageService, - marshalizer marshal.Marshalizer, - dataPools dataRetriever.PoolsHolder, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, - dataPacker dataRetriever.DataPacker, + shardCoordinator sharding.Coordinator, + messenger dataRetriever.TopicMessageHandler, + store dataRetriever.StorageService, + marshalizer marshal.Marshalizer, + dataPools dataRetriever.PoolsHolder, + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, + dataPacker dataRetriever.DataPacker, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, dataRetriever.ErrNilShardCoordinator - } - if messenger == nil || messenger.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMessenger - } - if store == nil || store.IsInterfaceNil() { - return nil, dataRetriever.ErrNilTxStorage - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMarshalizer - } - if dataPools == nil || dataPools.IsInterfaceNil() { - return nil, dataRetriever.ErrNilDataPoolHolder - } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { - return nil, dataRetriever.ErrNilUint64ByteSliceConverter - } - if dataPacker == nil || dataPacker.IsInterfaceNil() { - return nil, dataRetriever.ErrNilDataPacker - } - - return &resolversContainerFactory{ - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - dataPools: dataPools, - uint64ByteSliceConverter: uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - dataPacker: dataPacker, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, dataRetriever.ErrNilShardCoordinator + } + if messenger == nil || messenger.IsInterfaceNil() { + return nil, dataRetriever.ErrNilMessenger + } + if store == nil || store.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTxStorage + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, dataRetriever.ErrNilMarshalizer + } + if dataPools == nil || dataPools.IsInterfaceNil() { + return nil, dataRetriever.ErrNilDataPoolHolder + } + if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + return nil, dataRetriever.ErrNilUint64ByteSliceConverter + } + if dataPacker == nil || dataPacker.IsInterfaceNil() { + return nil, dataRetriever.ErrNilDataPacker + } + + return &resolversContainerFactory{ + shardCoordinator: shardCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + dataPools: dataPools, + uint64ByteSliceConverter: uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + dataPacker: dataPacker, + }, nil } // Create returns an interceptor container that will hold all interceptors in the system func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer, error) { - container := containers.NewResolversContainer() - - keys, resolverSlice, err := rcf.generateTxResolvers( - factory.TransactionTopic, - dataRetriever.TransactionUnit, - rcf.dataPools.Transactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateTxResolvers( - factory.UnsignedTransactionTopic, - dataRetriever.UnsignedTransactionUnit, - rcf.dataPools.UnsignedTransactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateTxResolvers( - factory.RewardsTransactionTopic, - dataRetriever.RewardTransactionUnit, - rcf.dataPools.RewardTransactions(), - ) - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateHdrResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMiniBlocksResolvers() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generatePeerChBlockBodyResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMetachainShardHeaderResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() - if err != nil { - return nil, err - } - err = container.AddMultiple(keys, resolverSlice) - if err != nil { - return nil, err - } - - return container, nil + container := containers.NewResolversContainer() + + keys, resolverSlice, err := rcf.generateTxResolvers( + factory.TransactionTopic, + dataRetriever.TransactionUnit, + rcf.dataPools.Transactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.UnsignedTransactionTopic, + dataRetriever.UnsignedTransactionUnit, + rcf.dataPools.UnsignedTransactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.RewardsTransactionTopic, + dataRetriever.RewardTransactionUnit, + rcf.dataPools.RewardTransactions(), + ) + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateHdrResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMiniBlocksResolvers() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generatePeerChBlockBodyResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMetachainShardHeaderResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + + return container, nil } func (rcf *resolversContainerFactory) createTopicAndAssignHandler( - topicName string, - resolver dataRetriever.Resolver, - createChannel bool, + topicName string, + resolver dataRetriever.Resolver, + createChannel bool, ) (dataRetriever.Resolver, error) { - err := rcf.messenger.CreateTopic(topicName, createChannel) - if err != nil { - return nil, err - } + err := rcf.messenger.CreateTopic(topicName, createChannel) + if err != nil { + return nil, err + } - return resolver, rcf.messenger.RegisterMessageProcessor(topicName, resolver) + return resolver, rcf.messenger.RegisterMessageProcessor(topicName, resolver) } //------- Tx resolvers func (rcf *resolversContainerFactory) generateTxResolvers( - topic string, - unit dataRetriever.UnitType, - dataPool dataRetriever.ShardedDataCacherNotifier, + topic string, + unit dataRetriever.UnitType, + dataPool dataRetriever.ShardedDataCacherNotifier, ) ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator + shardC := rcf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - resolverSlice := make([]dataRetriever.Resolver, noOfShards) + keys := make([]string, noOfShards) + resolverSlice := make([]dataRetriever.Resolver, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierTx := topic + shardC.CommunicationIdentifier(idx) - excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierTx := topic + shardC.CommunicationIdentifier(idx) + excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := rcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool) - if err != nil { - return nil, nil, err - } + resolver, err := rcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool) + if err != nil { + return nil, nil, err + } - resolverSlice[idx] = resolver - keys[idx] = identifierTx - } + resolverSlice[idx] = resolver + keys[idx] = identifierTx + } - return keys, resolverSlice, nil + return keys, resolverSlice, nil } func (rcf *resolversContainerFactory) createTxResolver( - topic string, - excludedTopic string, - unit dataRetriever.UnitType, - dataPool dataRetriever.ShardedDataCacherNotifier, + topic string, + excludedTopic string, + unit dataRetriever.UnitType, + dataPool dataRetriever.ShardedDataCacherNotifier, ) (dataRetriever.Resolver, error) { - txStorer := rcf.store.GetStorer(unit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) - if err != nil { - return nil, err - } - - //TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data - // this will improve the serving of transactions as the searching will be done only on 2 sharded data units - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - topic, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - uint32(0), - ) - if err != nil { - return nil, err - } - - resolver, err := resolvers.NewTxResolver( - resolverSender, - dataPool, - txStorer, - rcf.marshalizer, - rcf.dataPacker, - ) - if err != nil { - return nil, err - } - - //add on the request topic - return rcf.createTopicAndAssignHandler( - topic+resolverSender.TopicRequestSuffix(), - resolver, - false) + txStorer := rcf.store.GetStorer(unit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) + if err != nil { + return nil, err + } + + //TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data + // this will improve the serving of transactions as the searching will be done only on 2 sharded data units + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + uint32(0), + ) + if err != nil { + return nil, err + } + + resolver, err := resolvers.NewTxResolver( + resolverSender, + dataPool, + txStorer, + rcf.marshalizer, + rcf.dataPacker, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + resolver, + false) } //------- Hdr resolver func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one intrashard header topic - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) - hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - err = rcf.createTopicHeadersForMetachain() - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one intrashard header topic + identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) + hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.Headers(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + err = rcf.createTopicHeadersForMetachain() + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } func (rcf *resolversContainerFactory) createTopicHeadersForMetachain() error { - shardC := rcf.shardCoordinator - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + shardC := rcf.shardCoordinator + identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - return rcf.messenger.CreateTopic(identifierHdr, true) + return rcf.messenger.CreateTopic(identifierHdr, true) } //------- MiniBlocks resolvers func (rcf *resolversContainerFactory) generateMiniBlocksResolvers() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - resolverSlice := make([]dataRetriever.Resolver, noOfShards) + shardC := rcf.shardCoordinator + noOfShards := shardC.NumberOfShards() + keys := make([]string, noOfShards) + resolverSlice := make([]dataRetriever.Resolver, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) - excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) + excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := rcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic) - if err != nil { - return nil, nil, err - } + resolver, err := rcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic) + if err != nil { + return nil, nil, err + } - resolverSlice[idx] = resolver - keys[idx] = identifierMiniBlocks - } + resolverSlice[idx] = resolver + keys[idx] = identifierMiniBlocks + } - return keys, resolverSlice, nil + return keys, resolverSlice, nil } func (rcf *resolversContainerFactory) createMiniBlocksResolver(topic string, excludedTopic string) (dataRetriever.Resolver, error) { - miniBlocksStorer := rcf.store.GetStorer(dataRetriever.MiniBlockUnit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) - if err != nil { - return nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - topic, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - uint32(0), - ) - if err != nil { - return nil, err - } - - txBlkResolver, err := resolvers.NewGenericBlockBodyResolver( - resolverSender, - rcf.dataPools.MiniBlocks(), - miniBlocksStorer, - rcf.marshalizer, - ) - if err != nil { - return nil, err - } - - //add on the request topic - return rcf.createTopicAndAssignHandler( - topic+resolverSender.TopicRequestSuffix(), - txBlkResolver, - false) + miniBlocksStorer := rcf.store.GetStorer(dataRetriever.MiniBlockUnit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, excludedTopic) + if err != nil { + return nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + uint32(0), + ) + if err != nil { + return nil, err + } + + txBlkResolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + rcf.dataPools.MiniBlocks(), + miniBlocksStorer, + rcf.marshalizer, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + txBlkResolver, + false) } //------- PeerChBlocks resolvers func (rcf *resolversContainerFactory) generatePeerChBlockBodyResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one intrashard peer change blocks topic - identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - peerBlockBodyStorer := rcf.store.GetStorer(dataRetriever.PeerChangesUnit) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierPeerCh, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierPeerCh, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - resolver, err := resolvers.NewGenericBlockBodyResolver( - resolverSender, - rcf.dataPools.MiniBlocks(), - peerBlockBodyStorer, - rcf.marshalizer, - ) - if err != nil { - return nil, nil, err - } - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierPeerCh+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierPeerCh}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one intrashard peer change blocks topic + identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + peerBlockBodyStorer := rcf.store.GetStorer(dataRetriever.PeerChangesUnit) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierPeerCh, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierPeerCh, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + resolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + rcf.dataPools.MiniBlocks(), + peerBlockBodyStorer, + rcf.marshalizer, + ) + if err != nil { + return nil, nil, err + } + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierPeerCh+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierPeerCh}, []dataRetriever.Resolver{resolver}, nil } //------- MetachainShardHeaderResolvers func (rcf *resolversContainerFactory) generateMetachainShardHeaderResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one metachain header topic - //example: shardHeadersForMetachain_0_META - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) - hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one metachain header topic + //example: shardHeadersForMetachain_0_META + identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) + if err != nil { + return nil, nil, err + } + + hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + shardC.SelfId(), + ) + if err != nil { + return nil, nil, err + } + + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) + hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.Headers(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } //------- MetaBlockHeaderResolvers func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one metachain header block topic - //this is: metachainBlocks - identifierHdr := factory.MetachainBlocksTopic - hdrStorer := rcf.store.GetStorer(dataRetriever.MetaBlockUnit) - - metaAndCrtShardTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, metaAndCrtShardTopic, excludedPeersOnTopic) - if err != nil { - return nil, nil, err - } - - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - sharding.MetachainShardId, - ) - if err != nil { - return nil, nil, err - } - - hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.MetaBlocks(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil + shardC := rcf.shardCoordinator + + //only one metachain header block topic + //this is: metachainBlocks + identifierHdr := factory.MetachainBlocksTopic + hdrStorer := rcf.store.GetStorer(dataRetriever.MetaBlockUnit) + + metaAndCrtShardTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, metaAndCrtShardTopic, excludedPeersOnTopic) + if err != nil { + return nil, nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + identifierHdr, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + sharding.MetachainShardId, + ) + if err != nil { + return nil, nil, err + } + + hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) + resolver, err := resolvers.NewHeaderResolver( + resolverSender, + rcf.dataPools.MetaBlocks(), + rcf.dataPools.HeadersNonces(), + hdrStorer, + hdrNonceStore, + rcf.marshalizer, + rcf.uint64ByteSliceConverter, + ) + if err != nil { + return nil, nil, err + } + + //add on the request topic + _, err = rcf.createTopicAndAssignHandler( + identifierHdr+resolverSender.TopicRequestSuffix(), + resolver, + false) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } // IsInterfaceNil returns true if there is no value under the interface func (rcf *resolversContainerFactory) IsInterfaceNil() bool { - if rcf == nil { - return true - } - return false + if rcf == nil { + return true + } + return false } diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index c07453d1a55..aee73e043d8 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -1,431 +1,431 @@ package shard_test import ( - "errors" - "strings" - "testing" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "errors" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) var errExpected = errors.New("expected error") func createStubTopicMessageHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() + tmhs := mock.NewTopicMessageHandlerStub() - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { - if matchStrToErrOnCreate == "" { - return nil - } + tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + if matchStrToErrOnCreate == "" { + return nil + } - if strings.Contains(name, matchStrToErrOnCreate) { - return errExpected - } + if strings.Contains(name, matchStrToErrOnCreate) { + return errExpected + } - return nil - } + return nil + } - tmhs.RegisterMessageProcessorCalled = func(topic string, handler p2p.MessageProcessor) error { - if matchStrToErrOnRegister == "" { - return nil - } + tmhs.RegisterMessageProcessorCalled = func(topic string, handler p2p.MessageProcessor) error { + if matchStrToErrOnRegister == "" { + return nil + } - if strings.Contains(topic, matchStrToErrOnRegister) { - return errExpected - } + if strings.Contains(topic, matchStrToErrOnRegister) { + return errExpected + } - return nil - } + return nil + } - return tmhs + return tmhs } func createDataPools() dataRetriever.PoolsHolder { - pools := &mock.PoolsHolderStub{} - pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - } - pools.MiniBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - - return pools + pools := &mock.PoolsHolderStub{} + pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.HeadersCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + } + pools.MiniBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + + return pools } func createStore() dataRetriever.StorageService { - return &mock.ChainStorerMock{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{} - }, - } + return &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{} + }, + } } //------- NewResolversContainerFactory func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - nil, - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + nil, + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - nil, - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMessenger, err) } func TestNewResolversContainerFactory_NilBlockchainShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - nil, - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilTxStorage, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + nil, + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilTxStorage, err) } func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - nil, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + nil, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) } func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - nil, - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + nil, + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) } func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - nil, - &mock.DataPackerStub{}, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + nil, + &mock.DataPackerStub{}, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) } func TestNewResolversContainerFactory_NilSliceSplitterShouldErr(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - nil, - ) - - assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + nil, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { - t.Parallel() - - rcf, err := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - assert.NotNil(t, rcf) - assert.Nil(t, err) + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + assert.NotNil(t, rcf) + assert.Nil(t, err) } //------- Create func TestResolversContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.TransactionTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.TransactionTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.HeadersTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.HeadersTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.MiniBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.MiniBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.PeerChBodyTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler(factory.PeerChBodyTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.TransactionTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.TransactionTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.HeadersTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.HeadersTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.MiniBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.MiniBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.PeerChBodyTopic), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.PeerChBodyTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { - t.Parallel() - - rcf, _ := shard.NewResolversContainerFactory( - mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, err := rcf.Create() - - assert.NotNil(t, container) - assert.Nil(t, err) + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, err := rcf.Create() + + assert.NotNil(t, container) + assert.Nil(t, err) } func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { - t.Parallel() - - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - rcf, _ := shard.NewResolversContainerFactory( - shardCoordinator, - createStubTopicMessageHandler("", ""), - createStore(), - &mock.MarshalizerMock{}, - createDataPools(), - &mock.Uint64ByteSliceConverterMock{}, - &mock.DataPackerStub{}, - ) - - container, _ := rcf.Create() - - numResolverSCRs := noOfShards - numResolverTxs := noOfShards - numResolverRewardTxs := noOfShards - numResolverHeaders := 1 - numResolverMiniBlocks := noOfShards - numResolverPeerChanges := 1 - numResolverMetachainShardHeaders := 1 - numResolverMetaBlockHeaders := 1 - totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs - - assert.Equal(t, totalResolvers, container.Len()) + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + rcf, _ := shard.NewResolversContainerFactory( + shardCoordinator, + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + ) + + container, _ := rcf.Create() + + numResolverSCRs := noOfShards + numResolverTxs := noOfShards + numResolverRewardTxs := noOfShards + numResolverHeaders := 1 + numResolverMiniBlocks := noOfShards + numResolverPeerChanges := 1 + numResolverMetachainShardHeaders := 1 + numResolverMetaBlockHeaders := 1 + totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + + numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs + + assert.Equal(t, totalResolvers, container.Len()) } diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index a4508db6a0c..67338af603c 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -1,108 +1,108 @@ package mock import ( - "time" + "time" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/marshal" ) // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - NrCommitBlockCalled uint32 - Marshalizer marshal.Marshalizer - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddressesCalled func([]string) + NrCommitBlockCalled uint32 + Marshalizer marshal.Marshalizer + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddressesCalled func([]string) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorMock) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateTxBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } // DecodeBlockBody method decodes block body from a given byte array func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.Body + var body block.Body - err := blProcMock.Marshalizer.Unmarshal(&body, dta) - if err != nil { - return nil - } + err := blProcMock.Marshalizer.Unmarshal(&body, dta) + if err != nil { + return nil + } - return body + return body } // DecodeBlockHeader method decodes block header from a given byte array func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.Header + var header block.Header - err := blProcMock.Marshalizer.Unmarshal(&header, dta) - if err != nil { - return nil - } + err := blProcMock.Marshalizer.Unmarshal(&header, dta) + if err != nil { + return nil + } - return &header + return &header } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { - if blProcMock.SetConsensusRewardAddressesCalled != nil { - blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) - } + if blProcMock.SetConsensusRewardAddressesCalled != nil { + blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) + } } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index b00c01ca711..a9af9a371df 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -1,79 +1,79 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type SpecialAddressHandlerMock struct { - ElrondCommunityAddressCalled func() []byte - LeaderAddressCalled func() []byte - BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) (uint32, error) - AdrConv state.AddressConverter - ShardCoordinator sharding.Coordinator - - addresses []string - epoch uint32 - round uint64 + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + + addresses []string + epoch uint32 + round uint64 } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses + sh.addresses = consensusRewardAddresses } func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { - if sh.BurnAddressCalled == nil { - return []byte("burn0000000000000000000000000000") - } + if sh.BurnAddressCalled == nil { + return []byte("burn0000000000000000000000000000") + } - return sh.BurnAddressCalled() + return sh.BurnAddressCalled() } func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { - if sh.ElrondCommunityAddressCalled == nil { - return []byte("elrond00000000000000000000000000") - } + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond00000000000000000000000000") + } - return sh.ElrondCommunityAddressCalled() + return sh.ElrondCommunityAddressCalled() } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.LeaderAddressCalled == nil { - return []byte("leader0000000000000000000000000000") - } + if sh.LeaderAddressCalled == nil { + return []byte("leader0000000000000000000000000000") + } - return sh.LeaderAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + return sh.round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + return sh.epoch } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return 0, err - } + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } - return sh.ShardCoordinator.ComputeId(convAdr), nil + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { - if sh == nil { - return true - } - return false + if sh == nil { + return true + } + return false } diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index 8a607f4dfff..651638218fe 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -1,158 +1,158 @@ package block import ( - "context" - "fmt" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) const broadcastDelay = 2 * time.Second func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - nodesPerShard := 4 - nbMetaNodes := 4 - nbShards := 1 - consensusGroupSize := 3 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - seedAddress := integrationTests.GetConnectableAddress(advertiser) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinator( - nodesPerShard, - nbMetaNodes, - nbShards, - consensusGroupSize, - consensusGroupSize, - seedAddress, - ) - - for _, nodes := range nodesMap { - integrationTests.DisplayAndStartNodes(nodes) - } - - defer func() { - _ = advertiser.Close() - for _, nodes := range nodesMap { - for _, n := range nodes { - _ = n.Node.Stop() - } - } - }() - - fmt.Println("Shard node generating header and block body...") - - // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus - randomness := []byte("random seed") - round := uint64(1) - nonce := uint64(1) - - body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) - - nodesMap[0][0].BroadcastBlock(body, header) - - time.Sleep(broadcastDelay) - - headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) - headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) - - // all nodes in metachain have the block header in pool as interceptor validates it - for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } - - // all nodes in shard have the block in pool as interceptor validates it - for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Shard node generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + + nodesMap[0][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain have the block header in pool as interceptor validates it + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard have the block in pool as interceptor validates it + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } } func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - nodesPerShard := 4 - nbMetaNodes := 4 - nbShards := 1 - consensusGroupSize := 3 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - seedAddress := integrationTests.GetConnectableAddress(advertiser) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinator( - nodesPerShard, - nbMetaNodes, - nbShards, - consensusGroupSize, - consensusGroupSize, - seedAddress, - ) - - for _, nodes := range nodesMap { - integrationTests.DisplayAndStartNodes(nodes) - } - - defer func() { - _ = advertiser.Close() - for _, nodes := range nodesMap { - for _, n := range nodes { - _ = n.Node.Stop() - } - } - }() - - fmt.Println("Metachain node Generating header and block body...") - - // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus - randomness := []byte("random seed") - round := uint64(1) - nonce := uint64(1) - - body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( - sharding.MetachainShardId, - nodesMap, - round, - nonce, - randomness, - ) - - nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) - - time.Sleep(broadcastDelay) - - headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) - headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) - - // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus - for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } - - // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus - for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) - assert.True(t, ok) - assert.Equal(t, header, v) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Metachain node Generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( + sharding.MetachainShardId, + nodesMap, + round, + nonce, + randomness, + ) + + nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 3f7d9fb26cd..8b973ba8060 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -1,64 +1,64 @@ package smartContract import ( - "context" - "crypto/ecdsa" - "encoding/base64" - "encoding/hex" - "fmt" - "math/big" - "math/rand" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "context" + "crypto/ecdsa" + "encoding/base64" + "encoding/hex" + "fmt" + "math/big" + "math/rand" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) var r *rand.Rand @@ -72,835 +72,835 @@ var addrConv, _ = addressConverters.NewPlainAddressConverter(32, "0x") var opGas = int64(1) func init() { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + r = rand.New(rand.NewSource(time.Now().UnixNano())) } type testNode struct { - node *node.Node - messenger p2p.Messenger - shardId uint32 - accntState state.AccountsAdapter - blkc data.ChainHandler - store dataRetriever.StorageService - blkProcessor process.BlockProcessor - txProcessor process.TransactionProcessor - txCoordinator process.TransactionCoordinator - scrForwarder process.IntermediateTransactionHandler - broadcastMessenger consensus.BroadcastMessenger - sk crypto.PrivateKey - pk crypto.PublicKey - dPool dataRetriever.PoolsHolder - resFinder dataRetriever.ResolversFinder - headersRecv int32 - miniblocksRecv int32 - mutHeaders sync.Mutex - headersHashes [][]byte - headers []data.HeaderHandler - mutMiniblocks sync.Mutex - miniblocksHashes [][]byte - miniblocks []*dataBlock.MiniBlock - metachainHdrRecv int32 - txsRecv int32 + node *node.Node + messenger p2p.Messenger + shardId uint32 + accntState state.AccountsAdapter + blkc data.ChainHandler + store dataRetriever.StorageService + blkProcessor process.BlockProcessor + txProcessor process.TransactionProcessor + txCoordinator process.TransactionCoordinator + scrForwarder process.IntermediateTransactionHandler + broadcastMessenger consensus.BroadcastMessenger + sk crypto.PrivateKey + pk crypto.PublicKey + dPool dataRetriever.PoolsHolder + resFinder dataRetriever.ResolversFinder + headersRecv int32 + miniblocksRecv int32 + mutHeaders sync.Mutex + headersHashes [][]byte + headers []data.HeaderHandler + mutMiniblocks sync.Mutex + miniblocksHashes [][]byte + miniblocks []*dataBlock.MiniBlock + metachainHdrRecv int32 + txsRecv int32 } type keyPair struct { - sk crypto.PrivateKey - pk crypto.PublicKey + sk crypto.PrivateKey + pk crypto.PublicKey } type cryptoParams struct { - keyGen crypto.KeyGenerator - keys map[uint32][]*keyPair - singleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner } func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) - - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } - - return validatorsMap + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap } func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - - keysMap := make(map[uint32][]*keyPair) - keyPairs := make([]*keyPair, nodesPerShard) - for shardId := 0; shardId < nbShards; shardId++ { - for n := 0; n < nodesPerShard; n++ { - kp := &keyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[uint32(shardId)] = keyPairs - } - - keyPairs = make([]*keyPair, nbMetaNodes) - for n := 0; n < nbMetaNodes; n++ { - kp := &keyPair{} - kp.sk, kp.pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[sharding.MetachainShardId] = keyPairs - - params := &cryptoParams{ - keys: keysMap, - keyGen: keyGen, - singleSigner: singleSigner, - } - - return params + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params } func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { - keysMap := make(map[uint32][]string, 0) - - for shardId, pairList := range keyPairMap { - shardKeys := make([]string, len(pairList)) - for i, pair := range pairList { - bytes, _ := pair.pk.ToByteArray() - shardKeys[i] = string(bytes) - } - keysMap[shardId] = shardKeys - } - - return keysMap + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap } func createTestShardChain() *blockchain.BlockChain { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - blockChain, _ := blockchain.NewBlockChain( - badBlockCache, - ) - blockChain.GenesisHeader = &dataBlock.Header{} - genesisHeaderM, _ := testMarshalizer.Marshal(blockChain.GenesisHeader) + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + ) + blockChain.GenesisHeader = &dataBlock.Header{} + genesisHeaderM, _ := testMarshalizer.Marshal(blockChain.GenesisHeader) - blockChain.SetGenesisHeaderHash(testHasher.Compute(string(genesisHeaderM))) + blockChain.SetGenesisHeaderHash(testHasher.Compute(string(genesisHeaderM))) - return blockChain + return blockChain } func createMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + unit, _ := storageUnit.NewStorageUnit(cache, persist) + return unit } func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - - for i := uint32(0); i < numOfShards; i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) - } - - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) + + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, createMemUnit()) + } + + return store } func createTestShardDataPool() dataRetriever.PoolsHolder { - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - dPool, _ := dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardsTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlocks, - ) - - return dPool + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + dPool, _ := dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardsTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlocks, + ) + + return dPool } func createAccountsDB() *state.AccountsDB { - hasher := sha256.Sha256{} - store := createMemUnit() - - tr, _ := trie.NewTrie(store, testMarshalizer, hasher) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, testMarshalizer, &mock.AccountsFactoryStub{ - CreateAccountCalled: func(address state.AddressContainer, tracker state.AccountTracker) (wrapper state.AccountHandler, e error) { - return state.NewAccount(address, tracker) - }, - }) - return adb + hasher := sha256.Sha256{} + store := createMemUnit() + + tr, _ := trie.NewTrie(store, testMarshalizer, hasher) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, testMarshalizer, &mock.AccountsFactoryStub{ + CreateAccountCalled: func(address state.AddressContainer, tracker state.AccountTracker) (wrapper state.AccountHandler, e error) { + return state.NewAccount(address, tracker) + }, + }) + return adb } func createNetNode( - dPool dataRetriever.PoolsHolder, - accntAdapter state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - targetShardId uint32, - initialAddr string, - params *cryptoParams, - keysIndex int, + dPool dataRetriever.PoolsHolder, + accntAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + targetShardId uint32, + initialAddr string, + params *cryptoParams, + keysIndex int, ) ( - *node.Node, - p2p.Messenger, - dataRetriever.ResolversFinder, - process.BlockProcessor, - process.TransactionProcessor, - process.TransactionCoordinator, - process.IntermediateTransactionHandler, - data.ChainHandler, - dataRetriever.StorageService) { - - messenger := createMessengerWithKadDht(context.Background(), initialAddr) - keyPair := params.keys[targetShardId][keysIndex] - pkBuff, _ := keyPair.pk.ToByteArray() - fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) - - blkc := createTestShardChain() - store := createTestShardStore(shardCoordinator.NumberOfShards()) - uint64Converter := uint64ByteSlice.NewBigEndianConverter() - dataPacker, _ := partitioning.NewSizeDataPacker(testMarshalizer) - - interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - messenger, - store, - testMarshalizer, - testHasher, - params.keyGen, - params.singleSigner, - testMultiSig, - dPool, - testAddressConverter, - ) - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - - resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - messenger, - store, - testMarshalizer, - dPool, - uint64Converter, - dataPacker, - ) - resolversContainer, _ := resolversContainerFactory.Create() - resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( - resolversFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - 100, - ) - - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - testMarshalizer, - testHasher, - testAddressConverter, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - store, - dPool, - ) - interimProcContainer, _ := interimProcFactory.Create() - scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) - rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) - rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( - accntAdapter, - addrConv, - shardCoordinator, - rewardsInter, - ) - vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) - vmContainer := &mock.VMContainerMock{ - GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { - return vm, nil - }} - argsParser, _ := smartContract.NewAtArgumentParser() - scProcessor, _ := smartContract.NewSmartContractProcessor( - vmContainer, - argsParser, - testHasher, - testMarshalizer, - accntAdapter, - blockChainHook, - addrConv, - shardCoordinator, - scForwarder, - rewardsHandler, - ) - - txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) - - txProcessor, _ := transaction.NewTxProcessor( - accntAdapter, - testHasher, - testAddressConverter, - testMarshalizer, - shardCoordinator, - scProcessor, - rewardsHandler, - txTypeHandler, - ) - - fact, _ := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - store, - testMarshalizer, - testHasher, - dPool, - testAddressConverter, - accntAdapter, - requestHandler, - txProcessor, - scProcessor, - scProcessor, - rewardProcessor, - ) - container, _ := fact.Create() - - tc, _ := coordinator.NewTransactionCoordinator( - shardCoordinator, - accntAdapter, - dPool, - requestHandler, - container, - interimProcContainer, - ) - - genesisBlocks := createGenesisBlocks(shardCoordinator) - blockProcessor, _ := block.NewShardProcessor( - &mock.ServiceContainerMock{}, - dPool, - store, - testHasher, - testMarshalizer, - accntAdapter, - shardCoordinator, - nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - genesisBlocks, - requestHandler, - tc, - uint64Converter, - ) - - _ = blkc.SetGenesisHeader(genesisBlocks[shardCoordinator.SelfId()]) - - n, err := node.NewNode( - node.WithMessenger(messenger), - node.WithMarshalizer(testMarshalizer), - node.WithHasher(testHasher), - node.WithDataPool(dPool), - node.WithAddressConverter(testAddressConverter), - node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(params.keyGen), - node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(blkc), - node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(params.singleSigner), - node.WithTxSignPrivKey(keyPair.sk), - node.WithTxSignPubKey(keyPair.pk), - node.WithInterceptorsContainer(interceptorsContainer), - node.WithResolversFinder(resolversFinder), - node.WithBlockProcessor(blockProcessor), - node.WithDataStore(store), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - - if err != nil { - fmt.Println(err.Error()) - } - - return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + *node.Node, + p2p.Messenger, + dataRetriever.ResolversFinder, + process.BlockProcessor, + process.TransactionProcessor, + process.TransactionCoordinator, + process.IntermediateTransactionHandler, + data.ChainHandler, + dataRetriever.StorageService) { + + messenger := createMessengerWithKadDht(context.Background(), initialAddr) + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) + + blkc := createTestShardChain() + store := createTestShardStore(shardCoordinator.NumberOfShards()) + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + dataPacker, _ := partitioning.NewSizeDataPacker(testMarshalizer) + + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + messenger, + store, + testMarshalizer, + testHasher, + params.keyGen, + params.singleSigner, + testMultiSig, + dPool, + testAddressConverter, + ) + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + + resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + messenger, + store, + testMarshalizer, + dPool, + uint64Converter, + dataPacker, + ) + resolversContainer, _ := resolversContainerFactory.Create() + resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) + + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + testMarshalizer, + testHasher, + testAddressConverter, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + store, + dPool, + ) + interimProcContainer, _ := interimProcFactory.Create() + scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + rewardsInter, + ) + vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) + vmContainer := &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return vm, nil + }} + argsParser, _ := smartContract.NewAtArgumentParser() + scProcessor, _ := smartContract.NewSmartContractProcessor( + vmContainer, + argsParser, + testHasher, + testMarshalizer, + accntAdapter, + blockChainHook, + addrConv, + shardCoordinator, + scForwarder, + rewardsHandler, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) + + txProcessor, _ := transaction.NewTxProcessor( + accntAdapter, + testHasher, + testAddressConverter, + testMarshalizer, + shardCoordinator, + scProcessor, + rewardsHandler, + txTypeHandler, + ) + + fact, _ := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + store, + testMarshalizer, + testHasher, + dPool, + testAddressConverter, + accntAdapter, + requestHandler, + txProcessor, + scProcessor, + scProcessor, + rewardProcessor, + ) + container, _ := fact.Create() + + tc, _ := coordinator.NewTransactionCoordinator( + shardCoordinator, + accntAdapter, + dPool, + requestHandler, + container, + interimProcContainer, + ) + + genesisBlocks := createGenesisBlocks(shardCoordinator) + blockProcessor, _ := block.NewShardProcessor( + &mock.ServiceContainerMock{}, + dPool, + store, + testHasher, + testMarshalizer, + accntAdapter, + shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + genesisBlocks, + requestHandler, + tc, + uint64Converter, + ) + + _ = blkc.SetGenesisHeader(genesisBlocks[shardCoordinator.SelfId()]) + + n, err := node.NewNode( + node.WithMessenger(messenger), + node.WithMarshalizer(testMarshalizer), + node.WithHasher(testHasher), + node.WithDataPool(dPool), + node.WithAddressConverter(testAddressConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithKeyGen(params.keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(blkc), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithMultiSigner(testMultiSig), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), + node.WithInterceptorsContainer(interceptorsContainer), + node.WithResolversFinder(resolversFinder), + node.WithBlockProcessor(blockProcessor), + node.WithDataStore(store), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + + if err != nil { + fmt.Println(err.Error()) + } + + return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - - libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( - ctx, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - discovery.NewKadDhtPeerDiscoverer(time.Second, "test", []string{initialAddr}), - ) - if err != nil { - fmt.Println(err.Error()) - } - - return libP2PMes + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( + ctx, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + discovery.NewKadDhtPeerDiscoverer(time.Second, "test", []string{initialAddr}), + ) + if err != nil { + fmt.Println(err.Error()) + } + + return libP2PMes } func getConnectableAddress(mes p2p.Messenger) string { - for _, addr := range mes.Addresses() { - if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { - continue - } - return addr - } - return "" + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { + continue + } + return addr + } + return "" } func displayAndStartNodes(nodes map[uint32][]*testNode) { - for _, nodeList := range nodes { - for _, n := range nodeList { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() - - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() - } - } + for _, nodeList := range nodes { + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() + + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } + } } func createNodes( - numOfShards int, - nodesPerShard int, - serviceID string, + numOfShards int, + nodesPerShard int, + serviceID string, ) map[uint32][]*testNode { - //first node generated will have is pk belonging to firstSkShardId - numMetaChainNodes := 1 - nodes := make(map[uint32][]*testNode) - cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) - keysMap := pubKeysMapFromKeysMap(cp.keys) - validatorsMap := genValidatorsFromPubKeys(keysMap) - - for shardId := 0; shardId < numOfShards; shardId++ { - shardNodes := make([]*testNode, nodesPerShard) - - for j := 0; j < nodesPerShard; j++ { - testNode := &testNode{ - dPool: createTestShardDataPool(), - shardId: uint32(shardId), - } - - shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - testHasher, - uint32(shardId), - uint32(numOfShards), - validatorsMap, - ) - - accntAdapter := createAccountsDB() - n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( - testNode.dPool, - accntAdapter, - shardCoordinator, - nodesCoordinator, - testNode.shardId, - serviceID, - cp, - j, - ) - _ = n.CreateShardedStores() - - KeyPair := cp.keys[uint32(shardId)][j] - testNode.node = n - testNode.sk = KeyPair.sk - testNode.messenger = mes - testNode.pk = KeyPair.pk - testNode.resFinder = resFinder - testNode.accntState = accntAdapter - testNode.blkProcessor = blkProcessor - testNode.txProcessor = txProcessor - testNode.scrForwarder = scrForwarder - testNode.blkc = blkc - testNode.store = store - testNode.txCoordinator = transactionCoordinator - testNode.dPool.Headers().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.headersRecv, 1) - testNode.mutHeaders.Lock() - testNode.headersHashes = append(testNode.headersHashes, key) - header, _ := testNode.dPool.Headers().Peek(key) - testNode.headers = append(testNode.headers, header.(data.HeaderHandler)) - testNode.mutHeaders.Unlock() - }) - testNode.dPool.MiniBlocks().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.miniblocksRecv, 1) - testNode.mutMiniblocks.Lock() - testNode.miniblocksHashes = append(testNode.miniblocksHashes, key) - miniblock, _ := testNode.dPool.MiniBlocks().Peek(key) - testNode.miniblocks = append(testNode.miniblocks, miniblock.(*dataBlock.MiniBlock)) - testNode.mutMiniblocks.Unlock() - }) - testNode.dPool.MetaBlocks().RegisterHandler(func(key []byte) { - fmt.Printf("Got metachain header: %v\n", base64.StdEncoding.EncodeToString(key)) - atomic.AddInt32(&testNode.metachainHdrRecv, 1) - }) - testNode.dPool.Transactions().RegisterHandler(func(key []byte) { - atomic.AddInt32(&testNode.txsRecv, 1) - }) - testNode.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - testMarshalizer, - mes, - shardCoordinator, - KeyPair.sk, - &singlesig.SchnorrSigner{}, - ) - - shardNodes[j] = testNode - } - - nodes[uint32(shardId)] = shardNodes - } - - metaNodes := make([]*testNode, numMetaChainNodes) - for i := 0; i < numMetaChainNodes; i++ { - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( - 1, - 1, - testHasher, - sharding.MetachainShardId, - uint32(numOfShards), - validatorsMap, - ) - - metaNodes[i] = createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - nodesCoordinator, - serviceID, - cp, - i, - ) - } - - nodes[sharding.MetachainShardId] = metaNodes - - return nodes + //first node generated will have is pk belonging to firstSkShardId + numMetaChainNodes := 1 + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + + for shardId := 0; shardId < numOfShards; shardId++ { + shardNodes := make([]*testNode, nodesPerShard) + + for j := 0; j < nodesPerShard; j++ { + testNode := &testNode{ + dPool: createTestShardDataPool(), + shardId: uint32(shardId), + } + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + validatorsMap, + ) + + accntAdapter := createAccountsDB() + n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + testNode.dPool, + accntAdapter, + shardCoordinator, + nodesCoordinator, + testNode.shardId, + serviceID, + cp, + j, + ) + _ = n.CreateShardedStores() + + KeyPair := cp.keys[uint32(shardId)][j] + testNode.node = n + testNode.sk = KeyPair.sk + testNode.messenger = mes + testNode.pk = KeyPair.pk + testNode.resFinder = resFinder + testNode.accntState = accntAdapter + testNode.blkProcessor = blkProcessor + testNode.txProcessor = txProcessor + testNode.scrForwarder = scrForwarder + testNode.blkc = blkc + testNode.store = store + testNode.txCoordinator = transactionCoordinator + testNode.dPool.Headers().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.headersRecv, 1) + testNode.mutHeaders.Lock() + testNode.headersHashes = append(testNode.headersHashes, key) + header, _ := testNode.dPool.Headers().Peek(key) + testNode.headers = append(testNode.headers, header.(data.HeaderHandler)) + testNode.mutHeaders.Unlock() + }) + testNode.dPool.MiniBlocks().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.miniblocksRecv, 1) + testNode.mutMiniblocks.Lock() + testNode.miniblocksHashes = append(testNode.miniblocksHashes, key) + miniblock, _ := testNode.dPool.MiniBlocks().Peek(key) + testNode.miniblocks = append(testNode.miniblocks, miniblock.(*dataBlock.MiniBlock)) + testNode.mutMiniblocks.Unlock() + }) + testNode.dPool.MetaBlocks().RegisterHandler(func(key []byte) { + fmt.Printf("Got metachain header: %v\n", base64.StdEncoding.EncodeToString(key)) + atomic.AddInt32(&testNode.metachainHdrRecv, 1) + }) + testNode.dPool.Transactions().RegisterHandler(func(key []byte) { + atomic.AddInt32(&testNode.txsRecv, 1) + }) + testNode.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + testMarshalizer, + mes, + shardCoordinator, + KeyPair.sk, + &singlesig.SchnorrSigner{}, + ) + + shardNodes[j] = testNode + } + + nodes[uint32(shardId)] = shardNodes + } + + metaNodes := make([]*testNode, numMetaChainNodes) + for i := 0; i < numMetaChainNodes; i++ { + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + validatorsMap, + ) + + metaNodes[i] = createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + cp, + i, + ) + } + + nodes[sharding.MetachainShardId] = metaNodes + + return nodes } func createTestMetaChain() data.ChainHandler { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - metaChain, _ := blockchain.NewMetaChain( - badBlockCache, - ) - metaChain.GenesisBlock = &dataBlock.MetaBlock{} - - return metaChain + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + metaChain, _ := blockchain.NewMetaChain( + badBlockCache, + ) + metaChain.GenesisBlock = &dataBlock.MetaBlock{} + + return metaChain } func createTestMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) - for i := uint32(0); i < coordinator.NumberOfShards(); i++ { - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), createMemUnit()) - } - - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, createMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) + for i := uint32(0); i < coordinator.NumberOfShards(); i++ { + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), createMemUnit()) + } + + return store } func createTestMetaDataPool() dataRetriever.MetaPoolsHolder { - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} - miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) + cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} + miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) - cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - headersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - headersNonces, _ := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + headersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + headersNonces, _ := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - dPool, _ := dataPool.NewMetaDataPool( - metaBlocks, - miniblockHashes, - shardHeaders, - headersNonces, - ) + dPool, _ := dataPool.NewMetaDataPool( + metaBlocks, + miniblockHashes, + shardHeaders, + headersNonces, + ) - return dPool + return dPool } func createMetaNetNode( - dPool dataRetriever.MetaPoolsHolder, - accntAdapter state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - initialAddr string, - params *cryptoParams, - keysIndex int, + dPool dataRetriever.MetaPoolsHolder, + accntAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { - tn := testNode{} - - tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - keyPair := params.keys[sharding.MetachainShardId][keysIndex] - pkBuff, _ := keyPair.pk.ToByteArray() - fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) - - tn.blkc = createTestMetaChain() - store := createTestMetaStore(shardCoordinator) - uint64Converter := uint64ByteSlice.NewBigEndianConverter() - - interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - tn.messenger, - store, - testMarshalizer, - testHasher, - testMultiSig, - dPool, - ) - interceptorsContainer, err := interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - - resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( - shardCoordinator, - tn.messenger, - store, - testMarshalizer, - dPool, - uint64Converter, - ) - resolversContainer, _ := resolversContainerFactory.Create() - resolvers, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - - requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic) - - genesisBlocks := createGenesisBlocks(shardCoordinator) - blkProc, _ := block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accntAdapter, - dPool, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - }, - shardCoordinator, - nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, - testHasher, - testMarshalizer, - store, - genesisBlocks, - requestHandler, - uint64Converter, - ) - - _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) - - tn.blkProcessor = blkProc - - tn.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - testMarshalizer, - tn.messenger, - shardCoordinator, - keyPair.sk, - params.singleSigner, - ) - - n, err := node.NewNode( - node.WithMessenger(tn.messenger), - node.WithMarshalizer(testMarshalizer), - node.WithHasher(testHasher), - node.WithMetaDataPool(dPool), - node.WithAddressConverter(testAddressConverter), - node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(params.keyGen), - node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(tn.blkc), - node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(params.singleSigner), - node.WithPrivKey(keyPair.sk), - node.WithPubKey(keyPair.pk), - node.WithInterceptorsContainer(interceptorsContainer), - node.WithResolversFinder(resolvers), - node.WithBlockProcessor(tn.blkProcessor), - node.WithDataStore(store), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - if err != nil { - fmt.Println(err.Error()) - return nil - } - - tn.node = n - tn.sk = keyPair.sk - tn.pk = keyPair.pk - tn.accntState = accntAdapter - tn.shardId = sharding.MetachainShardId - - dPool.MetaChainBlocks().RegisterHandler(func(key []byte) { - atomic.AddInt32(&tn.metachainHdrRecv, 1) - }) - dPool.ShardHeaders().RegisterHandler(func(key []byte) { - atomic.AddInt32(&tn.headersRecv, 1) - tn.mutHeaders.Lock() - metaHeader, _ := dPool.ShardHeaders().Peek(key) - tn.headers = append(tn.headers, metaHeader.(data.HeaderHandler)) - tn.mutHeaders.Unlock() - }) - - return &tn + tn := testNode{} + + tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) + + tn.blkc = createTestMetaChain() + store := createTestMetaStore(shardCoordinator) + uint64Converter := uint64ByteSlice.NewBigEndianConverter() + + interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + tn.messenger, + store, + testMarshalizer, + testHasher, + testMultiSig, + dPool, + ) + interceptorsContainer, err := interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + + resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( + shardCoordinator, + tn.messenger, + store, + testMarshalizer, + dPool, + uint64Converter, + ) + resolversContainer, _ := resolversContainerFactory.Create() + resolvers, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) + + requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic) + + genesisBlocks := createGenesisBlocks(shardCoordinator) + blkProc, _ := block.NewMetaProcessor( + &mock.ServiceContainerMock{}, + accntAdapter, + dPool, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + }, + shardCoordinator, + nodesCoordinator, + &mock.SpecialAddressHandlerMock{ + ShardCoordinator: shardCoordinator, + AdrConv: testAddressConverter, + }, + testHasher, + testMarshalizer, + store, + genesisBlocks, + requestHandler, + uint64Converter, + ) + + _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) + + tn.blkProcessor = blkProc + + tn.broadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + testMarshalizer, + tn.messenger, + shardCoordinator, + keyPair.sk, + params.singleSigner, + ) + + n, err := node.NewNode( + node.WithMessenger(tn.messenger), + node.WithMarshalizer(testMarshalizer), + node.WithHasher(testHasher), + node.WithMetaDataPool(dPool), + node.WithAddressConverter(testAddressConverter), + node.WithAccountsAdapter(accntAdapter), + node.WithKeyGen(params.keyGen), + node.WithShardCoordinator(shardCoordinator), + node.WithBlockChain(tn.blkc), + node.WithUint64ByteSliceConverter(uint64Converter), + node.WithMultiSigner(testMultiSig), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), + node.WithInterceptorsContainer(interceptorsContainer), + node.WithResolversFinder(resolvers), + node.WithBlockProcessor(tn.blkProcessor), + node.WithDataStore(store), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + if err != nil { + fmt.Println(err.Error()) + return nil + } + + tn.node = n + tn.sk = keyPair.sk + tn.pk = keyPair.pk + tn.accntState = accntAdapter + tn.shardId = sharding.MetachainShardId + + dPool.MetaChainBlocks().RegisterHandler(func(key []byte) { + atomic.AddInt32(&tn.metachainHdrRecv, 1) + }) + dPool.ShardHeaders().RegisterHandler(func(key []byte) { + atomic.AddInt32(&tn.headersRecv, 1) + tn.mutHeaders.Lock() + metaHeader, _ := dPool.ShardHeaders().Peek(key) + tn.headers = append(tn.headers, metaHeader.(data.HeaderHandler)) + tn.mutHeaders.Unlock() + }) + + return &tn } func createGenesisBlocks(shardCoordinator sharding.Coordinator) map[uint32]data.HeaderHandler { - genesisBlocks := make(map[uint32]data.HeaderHandler) - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - genesisBlocks[shardId] = createGenesisBlock(shardId) - } + genesisBlocks := make(map[uint32]data.HeaderHandler) + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + genesisBlocks[shardId] = createGenesisBlock(shardId) + } - genesisBlocks[sharding.MetachainShardId] = createGenesisMetaBlock() + genesisBlocks[sharding.MetachainShardId] = createGenesisMetaBlock() - return genesisBlocks + return genesisBlocks } func createGenesisBlock(shardId uint32) *dataBlock.Header { - return &dataBlock.Header{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - ShardId: shardId, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardId: shardId, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } func createGenesisMetaBlock() *dataBlock.MetaBlock { - return &dataBlock.MetaBlock{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } func createMintingForSenders( - nodes []*testNode, - senderShard uint32, - sendersPublicKeys [][]byte, - value *big.Int, + nodes []*testNode, + senderShard uint32, + sendersPublicKeys [][]byte, + value *big.Int, ) { - for _, n := range nodes { - //only sender shard nodes will be minted - if n.shardId != senderShard { - continue - } + for _, n := range nodes { + //only sender shard nodes will be minted + if n.shardId != senderShard { + continue + } - for _, pk := range sendersPublicKeys { - adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pk) - account, _ := n.accntState.GetAccountWithJournal(adr) - _ = account.(*state.Account).SetBalanceWithJournal(value) - } + for _, pk := range sendersPublicKeys { + adr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pk) + account, _ := n.accntState.GetAccountWithJournal(adr) + _ = account.(*state.Account).SetBalanceWithJournal(value) + } - _, _ = n.accntState.Commit() - } + _, _ = n.accntState.Commit() + } } func createVMAndBlockchainHook(accnts state.AccountsAdapter) (vmcommon.VMExecutionHandler, *hooks.VMAccountsDB) { - blockChainHook, _ := hooks.NewVMAccountsDB(accnts, addrConv) - vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, testHasher) - vm.GasForOperation = uint64(opGas) + blockChainHook, _ := hooks.NewVMAccountsDB(accnts, addrConv) + vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, testHasher) + vm.GasForOperation = uint64(opGas) - return vm, blockChainHook + return vm, blockChainHook } diff --git a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go index ec3afcd6772..7e2b1cbacf2 100644 --- a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go +++ b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go @@ -1,21 +1,21 @@ package transaction import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" + "context" + "encoding/hex" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) // TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard tests what happens when @@ -23,66 +23,66 @@ import ( // Node 0 is part of the shard 0 and its public key is mapped also in shard 0. // Transactions should spread only in shard 0. func TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) - - fmt.Println("Generating and broadcasting transactions...") - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - time.Sleep(time.Second * 10) - - //since there is a slight chance that some transactions get lost (peer to slow, queue full, validators throttling...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender shard (all 3 nodes from shard 0) have the transactions - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == 0 { - assert.Equal(t, maxTxReceived, atomic.LoadInt32(&n.CounterTxRecv)) - continue - } - - assert.Equal(t, int32(0), atomic.LoadInt32(&n.CounterTxRecv)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) + + fmt.Println("Generating and broadcasting transactions...") + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + time.Sleep(time.Second * 10) + + //since there is a slight chance that some transactions get lost (peer to slow, queue full, validators throttling...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender shard (all 3 nodes from shard 0) have the transactions + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == 0 { + assert.Equal(t, maxTxReceived, atomic.LoadInt32(&n.CounterTxRecv)) + continue + } + + assert.Equal(t, int32(0), atomic.LoadInt32(&n.CounterTxRecv)) + } } // TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard tests what happens when @@ -90,74 +90,74 @@ func TestNode_InterceptorBulkTxsSentFromSameShardShouldRemainInSenderShard(t *te // Node 0 is part of the shard 0 and its public key is mapped in shard 4. // Transactions should spread only in shard 4. func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - firstSkInShard := uint32(4) - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) - - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - - //display, can be removed - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - } - - //since there is a slight chance that some transactions get lost (peer to slow, queue full...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender shard (all 3 nodes from shard firstSkInShard) has the transactions - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == firstSkInShard { - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) - continue - } - - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + firstSkInShard := uint32(4) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 5) + + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + + //display, can be removed + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + } + + //since there is a slight chance that some transactions get lost (peer to slow, queue full...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender shard (all 3 nodes from shard firstSkInShard) has the transactions + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == firstSkInShard { + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) + continue + } + + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) + } } // TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShardAndRequestShouldWork tests what happens when @@ -169,183 +169,183 @@ func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShard(t // Transactions requested by another shard (2 for example) will not store the received transactions // (interceptors will filter them out) func TestNode_InterceptorBulkTxsSentFromOtherShardShouldBeRoutedInSenderShardAndRequestShouldWork(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - numOfShards := 6 - nodesPerShard := 3 - numMetachainNodes := 0 - firstSkInShard := uint32(4) - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - integrationTests.GetConnectableAddress(advertiser), - ) - nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) - integrationTests.CreateAccountForNodes(nodes) - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - txToSend := 100 - - shardRequester := uint32(5) - randomShard := uint32(2) - - generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), shardRequester) - - _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) - pkBytes, _ := pkInShardFive.ToByteArray() - addrInShardFive := hex.EncodeToString(pkBytes) - - mutGeneratedTxHashes := sync.Mutex{} - generatedTxHashes := make([][]byte, 0) - //wire a new hook for generated txs on a node in sender shard to populate tx hashes generated - for _, n := range nodes { - if n.ShardCoordinator.SelfId() == firstSkInShard { - n.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { - mutGeneratedTxHashes.Lock() - generatedTxHashes = append(generatedTxHashes, key) - mutGeneratedTxHashes.Unlock() - }) - } - } - - _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) - - fmt.Println("Waiting for senders to fetch generated transactions...") - time.Sleep(time.Second * 10) - - fmt.Println("Request transactions by destination shard nodes...") - //periodically compute and request missing transactions - for i := 0; i < 10; i++ { - integrationTests.ComputeAndRequestMissingTransactions(nodes, generatedTxHashes, firstSkInShard, shardRequester, randomShard) - time.Sleep(time.Second) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - } - - //since there is a slight chance that some transactions get lost (peer to slow, queue full...) - //we should get the max transactions received - maxTxReceived := int32(0) - for _, n := range nodes { - txRecv := atomic.LoadInt32(&n.CounterTxRecv) - - if txRecv > maxTxReceived { - maxTxReceived = txRecv - } - } - - assert.True(t, maxTxReceived > 0) - - //only sender and destination shards have the transactions - for _, n := range nodes { - isSenderOrDestinationShard := n.ShardCoordinator.SelfId() == firstSkInShard || n.ShardCoordinator.SelfId() == shardRequester - - if isSenderOrDestinationShard { - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) - continue - } - - assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) - } + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 6 + nodesPerShard := 3 + numMetachainNodes := 0 + firstSkInShard := uint32(4) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + nodes[0] = integrationTests.NewTestProcessorNode(uint32(numOfShards), 0, firstSkInShard, integrationTests.GetConnectableAddress(advertiser)) + integrationTests.CreateAccountForNodes(nodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + txToSend := 100 + + shardRequester := uint32(5) + randomShard := uint32(2) + + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), shardRequester) + + _, pkInShardFive, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 5) + pkBytes, _ := pkInShardFive.ToByteArray() + addrInShardFive := hex.EncodeToString(pkBytes) + + mutGeneratedTxHashes := sync.Mutex{} + generatedTxHashes := make([][]byte, 0) + //wire a new hook for generated txs on a node in sender shard to populate tx hashes generated + for _, n := range nodes { + if n.ShardCoordinator.SelfId() == firstSkInShard { + n.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { + mutGeneratedTxHashes.Lock() + generatedTxHashes = append(generatedTxHashes, key) + mutGeneratedTxHashes.Unlock() + }) + } + } + + _ = nodes[0].Node.GenerateAndSendBulkTransactions(addrInShardFive, big.NewInt(1), uint64(txToSend)) + + fmt.Println("Waiting for senders to fetch generated transactions...") + time.Sleep(time.Second * 10) + + fmt.Println("Request transactions by destination shard nodes...") + //periodically compute and request missing transactions + for i := 0; i < 10; i++ { + integrationTests.ComputeAndRequestMissingTransactions(nodes, generatedTxHashes, firstSkInShard, shardRequester, randomShard) + time.Sleep(time.Second) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + } + + //since there is a slight chance that some transactions get lost (peer to slow, queue full...) + //we should get the max transactions received + maxTxReceived := int32(0) + for _, n := range nodes { + txRecv := atomic.LoadInt32(&n.CounterTxRecv) + + if txRecv > maxTxReceived { + maxTxReceived = txRecv + } + } + + assert.True(t, maxTxReceived > 0) + + //only sender and destination shards have the transactions + for _, n := range nodes { + isSenderOrDestinationShard := n.ShardCoordinator.SelfId() == firstSkInShard || n.ShardCoordinator.SelfId() == shardRequester + + if isSenderOrDestinationShard { + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), maxTxReceived) + continue + } + + assert.Equal(t, atomic.LoadInt32(&n.CounterTxRecv), int32(0)) + } } func TestNode_InMultiShardEnvRequestTxsShouldRequireOnlyFromTheOtherShard(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - - nodes := make([]*integrationTests.TestProcessorNode, 0) - maxShards := 2 - nodesPerShard := 2 - txGenerated := 10 - - defer func() { - _ = advertiser.Close() - for _, n := range nodes { - _ = n.Node.Stop() - } - }() - - //shard 0, requesters - recvTxs := make(map[int]map[string]struct{}) - mutRecvTxs := sync.Mutex{} - for i := 0; i < nodesPerShard; i++ { - dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) - - tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( - uint32(maxShards), - 0, - 0, - integrationTests.GetConnectableAddress(advertiser), - dPool, - ) - - nodes = append(nodes, tn) - } - - var txHashesGenerated [][]byte - var dPool dataRetriever.PoolsHolder - shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) - dPool, txHashesGenerated = integrationTests.CreateResolversDataPool(t, txGenerated, 0, 1, shardCoordinator) - //shard 1, resolvers, same data pool, does not matter - for i := 0; i < nodesPerShard; i++ { - tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( - uint32(maxShards), - 1, - 1, - integrationTests.GetConnectableAddress(advertiser), - dPool, - ) - - atomic.StoreInt32(&tn.CounterTxRecv, int32(txGenerated)) - - nodes = append(nodes, tn) - } - - integrationTests.DisplayAndStartNodes(nodes) - fmt.Println("Delaying for node bootstrap and topic announcement...") - time.Sleep(time.Second * 5) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - - fmt.Println("Request nodes start asking the data...") - reqShardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) - for i := 0; i < nodesPerShard; i++ { - resolver, _ := nodes[i].ResolverFinder.Get(factory.TransactionTopic + reqShardCoordinator.CommunicationIdentifier(1)) - txResolver, ok := resolver.(*resolvers.TxResolver) - assert.True(t, ok) - - _ = txResolver.RequestDataFromHashArray(txHashesGenerated) - } - - time.Sleep(time.Second * 5) - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - for i := 0; i < nodesPerShard; i++ { - mapTx := recvTxs[i] - assert.NotNil(t, mapTx) - - txsReceived := len(recvTxs[i]) - assert.Equal(t, txGenerated, txsReceived) - - atomic.StoreInt32(&nodes[i].CounterTxRecv, int32(txsReceived)) - } - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) + if testing.Short() { + t.Skip("this is not a short test") + } + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := make([]*integrationTests.TestProcessorNode, 0) + maxShards := 2 + nodesPerShard := 2 + txGenerated := 10 + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + //shard 0, requesters + recvTxs := make(map[int]map[string]struct{}) + mutRecvTxs := sync.Mutex{} + for i := 0; i < nodesPerShard; i++ { + dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) + + tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( + uint32(maxShards), + 0, + 0, + integrationTests.GetConnectableAddress(advertiser), + dPool, + ) + + nodes = append(nodes, tn) + } + + var txHashesGenerated [][]byte + var dPool dataRetriever.PoolsHolder + shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) + dPool, txHashesGenerated = integrationTests.CreateResolversDataPool(t, txGenerated, 0, 1, shardCoordinator) + //shard 1, resolvers, same data pool, does not matter + for i := 0; i < nodesPerShard; i++ { + tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( + uint32(maxShards), + 1, + 1, + integrationTests.GetConnectableAddress(advertiser), + dPool, + ) + + atomic.StoreInt32(&tn.CounterTxRecv, int32(txGenerated)) + + nodes = append(nodes, tn) + } + + integrationTests.DisplayAndStartNodes(nodes) + fmt.Println("Delaying for node bootstrap and topic announcement...") + time.Sleep(time.Second * 5) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + + fmt.Println("Request nodes start asking the data...") + reqShardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(maxShards), 0) + for i := 0; i < nodesPerShard; i++ { + resolver, _ := nodes[i].ResolverFinder.Get(factory.TransactionTopic + reqShardCoordinator.CommunicationIdentifier(1)) + txResolver, ok := resolver.(*resolvers.TxResolver) + assert.True(t, ok) + + _ = txResolver.RequestDataFromHashArray(txHashesGenerated) + } + + time.Sleep(time.Second * 5) + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + for i := 0; i < nodesPerShard; i++ { + mapTx := recvTxs[i] + assert.NotNil(t, mapTx) + + txsReceived := len(recvTxs[i]) + assert.Equal(t, txGenerated, txsReceived) + + atomic.StoreInt32(&nodes[i].CounterTxRecv, int32(txsReceived)) + } + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) } diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 962589ef327..64e527f794c 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -1,182 +1,182 @@ package transaction import ( - "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "math/big" - "reflect" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/stretchr/testify/assert" + "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/stretchr/testify/assert" ) func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - var nrOfShards uint32 = 1 - var shardID uint32 = 0 - var txSignPrivKeyShardId uint32 = 0 - requesterNodeAddr := "0" - resolverNodeAddr := "1" - - fmt.Println("Requester: ") - nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) - - fmt.Println("Resolver:") - nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() - defer func() { - _ = nRequester.Node.Stop() - _ = nResolver.Node.Stop() - }() - - //connect messengers together - time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) - assert.Nil(t, err) - - time.Sleep(time.Second) - - buffPk1, _ := nRequester.OwnAccount.SkTxSign.GeneratePublic().ToByteArray() - - //Step 1. Generate a signed transaction - tx := transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - SndAddr: buffPk1, - Data: "tx notarized data", - } - - txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(nRequester.OwnAccount.SkTxSign, txBuff) - signedTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - - fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) - - chanDone := make(chan bool) - - txHash := integrationTests.TestHasher.Compute(string(signedTxBuff)) - - //step 2. wire up a received handler for requester - nRequester.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { - txStored, _ := nRequester.ShardDataPool.Transactions().ShardDataStore( - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ).Get(key) - - if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { - chanDone <- true - } - - assert.Equal(t, txStored, &tx) - assert.Equal(t, txHash, key) - }) - - //Step 3. add the transaction in resolver pool - nResolver.ShardDataPool.Transactions().AddData( - txHash, - &tx, - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ) - - //Step 4. request tx - txResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.TransactionTopic) - err = txResolver.RequestDataFromHash(txHash) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - } + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + buffPk1, _ := nRequester.OwnAccount.SkTxSign.GeneratePublic().ToByteArray() + + //Step 1. Generate a signed transaction + tx := transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + SndAddr: buffPk1, + Data: "tx notarized data", + } + + txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(nRequester.OwnAccount.SkTxSign, txBuff) + signedTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(signedTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.Transactions().RegisterHandler(func(key []byte) { + txStored, _ := nRequester.ShardDataPool.Transactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { + chanDone <- true + } + + assert.Equal(t, txStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.Transactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + txResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.TransactionTopic) + err = txResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } } func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - var nrOfShards uint32 = 1 - var shardID uint32 = 0 - var txSignPrivKeyShardId uint32 = 0 - requesterNodeAddr := "0" - resolverNodeAddr := "1" - - fmt.Println("Requester: ") - nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) - - fmt.Println("Resolver:") - nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) - _ = nRequester.Node.Start() - _ = nResolver.Node.Start() - defer func() { - _ = nRequester.Node.Stop() - _ = nResolver.Node.Stop() - }() - - //connect messengers together - time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) - assert.Nil(t, err) - - time.Sleep(time.Second) - - //Step 1. Generate a signed transaction - tx := rewardTx.RewardTx{ - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - Round: 0, - Epoch: 0, - ShardId: 0, - } - - marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) - - fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) - - chanDone := make(chan bool) - - txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) - - //step 2. wire up a received handler for requester - nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { - rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ).Get(key) - - if reflect.DeepEqual(rewardTxStored, &tx) { - chanDone <- true - } - - assert.Equal(t, rewardTxStored, &tx) - assert.Equal(t, txHash, key) - }) - - //Step 3. add the transaction in resolver pool - nResolver.ShardDataPool.RewardTransactions().AddData( - txHash, - &tx, - process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), - ) - - //Step 4. request tx - rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) - err = rewardTxResolver.RequestDataFromHash(txHash) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - } + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + //Step 1. Generate a signed transaction + tx := rewardTx.RewardTx{ + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + Round: 0, + Epoch: 0, + ShardId: 0, + } + + marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { + rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(rewardTxStored, &tx) { + chanDone <- true + } + + assert.Equal(t, rewardTxStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.RewardTransactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + err = rewardTxResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 1ff9e0c5b1b..a9da172b806 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1,57 +1,57 @@ package integrationTests import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "fmt" - "math/big" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/crypto/signing" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" - "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p" - "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" - "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" - "github.com/ElrondNetwork/elrond-go/process" - procFactory "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - txProc "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" - "github.com/btcsuite/btcd/btcec" - libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/crypto/signing" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" + "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go/p2p/libp2p/discovery" + "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" + "github.com/ElrondNetwork/elrond-go/process" + procFactory "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + txProc "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" ) var stepDelay = time.Second @@ -59,1165 +59,1165 @@ var p2pBootstrapStepDelay = 5 * time.Second // GetConnectableAddress returns a non circuit, non windows default connectable address for provided messenger func GetConnectableAddress(mes p2p.Messenger) string { - for _, addr := range mes.Addresses() { - if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { - continue - } - return addr - } - return "" + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { + continue + } + return addr + } + return "" } // CreateMessengerWithKadDht creates a new libp2p messenger with kad-dht peer discovery func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { - prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) - sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) - libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( - ctx, - sk, - nil, - loadBalancer.NewOutgoingChannelLoadBalancer(), - discovery.NewKadDhtPeerDiscoverer(stepDelay, "test", []string{initialAddr}), - ) - if err != nil { - fmt.Println(err.Error()) - } + libP2PMes, err := libp2p.NewNetworkMessengerOnFreePort( + ctx, + sk, + nil, + loadBalancer.NewOutgoingChannelLoadBalancer(), + discovery.NewKadDhtPeerDiscoverer(stepDelay, "test", []string{initialAddr}), + ) + if err != nil { + fmt.Println(err.Error()) + } - return libP2PMes + return libP2PMes } // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier, nbShards uint32) dataRetriever.PoolsHolder { - if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) - } + if txPool == nil { + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - dPool, _ := dataPool.NewShardedDataPool( - txPool, - uTxPool, - rewardsTxPool, - hdrPool, - hdrNonces, - txBlockBody, - peerChangeBlockBody, - metaBlocks, - ) + dPool, _ := dataPool.NewShardedDataPool( + txPool, + uTxPool, + rewardsTxPool, + hdrPool, + hdrNonces, + txBlockBody, + peerChangeBlockBody, + metaBlocks, + ) - return dPool + return dPool } // CreateTestMetaDataPool creates a test data pool for meta nodes func CreateTestMetaDataPool() dataRetriever.MetaPoolsHolder { - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} - miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) + cacherCfg = storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache} + miniblockHashes, _ := shardedData.NewShardedData(cacherCfg) - cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - shardHeadersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - shardHeadersNonces, _ := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + shardHeadersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + shardHeadersNonces, _ := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - dPool, _ := dataPool.NewMetaDataPool( - metaBlocks, - miniblockHashes, - shardHeaders, - shardHeadersNonces, - ) + dPool, _ := dataPool.NewMetaDataPool( + metaBlocks, + miniblockHashes, + shardHeaders, + shardHeadersNonces, + ) - return dPool + return dPool } // CreateMemUnit returns an in-memory storer implementation (the vast majority of tests do not require effective // disk I/O) func CreateMemUnit() storage.Storer { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.NewlruDB(100000) - unit, _ := storageUnit.NewStorageUnit(cache, persist) + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) + persist, _ := memorydb.NewlruDB(100000) + unit, _ := storageUnit.NewStorageUnit(cache, persist) - return unit + return unit } // CreateShardStore creates a storage service for shard nodes func CreateShardStore(numOfShards uint32) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) - for i := uint32(0); i < numOfShards; i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) - } + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) + } - return store + return store } // CreateMetaStore creates a storage service for meta nodes func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageService { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) - for i := uint32(0); i < coordinator.NumberOfShards(); i++ { - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), CreateMemUnit()) - } + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + for i := uint32(0); i < coordinator.NumberOfShards(); i++ { + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit+dataRetriever.UnitType(i), CreateMemUnit()) + } - return store + return store } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { - hasher := sha256.Sha256{} - store := CreateMemUnit() + hasher := sha256.Sha256{} + store := CreateMemUnit() - tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) - accountFactory, _ := factory.NewAccountFactoryCreator(accountType) - adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) + tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) - return adb, tr, store + return adb, tr, store } // CreateShardChain creates a blockchain implementation used by the shard nodes func CreateShardChain() *blockchain.BlockChain { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - blockChain, _ := blockchain.NewBlockChain( - badBlockCache, - ) - blockChain.GenesisHeader = &dataBlock.Header{} - genesisHeaderM, _ := TestMarshalizer.Marshal(blockChain.GenesisHeader) + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + blockChain, _ := blockchain.NewBlockChain( + badBlockCache, + ) + blockChain.GenesisHeader = &dataBlock.Header{} + genesisHeaderM, _ := TestMarshalizer.Marshal(blockChain.GenesisHeader) - blockChain.SetGenesisHeaderHash(TestHasher.Compute(string(genesisHeaderM))) + blockChain.SetGenesisHeaderHash(TestHasher.Compute(string(genesisHeaderM))) - return blockChain + return blockChain } // CreateMetaChain creates a blockchain implementation used by the meta nodes func CreateMetaChain() data.ChainHandler { - cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) - metaChain, _ := blockchain.NewMetaChain( - badBlockCache, - ) - metaChain.GenesisBlock = &dataBlock.MetaBlock{} + cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) + metaChain, _ := blockchain.NewMetaChain( + badBlockCache, + ) + metaChain.GenesisBlock = &dataBlock.MetaBlock{} - return metaChain + return metaChain } // CreateGenesisBlocks creates empty genesis blocks for all known shards, including metachain func CreateGenesisBlocks(shardCoordinator sharding.Coordinator) map[uint32]data.HeaderHandler { - genesisBlocks := make(map[uint32]data.HeaderHandler) - for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - genesisBlocks[shardId] = CreateGenesisBlock(shardId) - } + genesisBlocks := make(map[uint32]data.HeaderHandler) + for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { + genesisBlocks[shardId] = CreateGenesisBlock(shardId) + } - genesisBlocks[sharding.MetachainShardId] = CreateGenesisMetaBlock() + genesisBlocks[sharding.MetachainShardId] = CreateGenesisMetaBlock() - return genesisBlocks + return genesisBlocks } // CreateGenesisBlock creates a new mock shard genesis block func CreateGenesisBlock(shardId uint32) *dataBlock.Header { - rootHash := []byte("root hash") + rootHash := []byte("root hash") - return &dataBlock.Header{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - ShardId: shardId, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardId: shardId, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } // CreateGenesisMetaBlock creates a new mock meta genesis block func CreateGenesisMetaBlock() *dataBlock.MetaBlock { - rootHash := []byte("root hash") + rootHash := []byte("root hash") - return &dataBlock.MetaBlock{ - Nonce: 0, - Round: 0, - Signature: rootHash, - RandSeed: rootHash, - PrevRandSeed: rootHash, - PubKeysBitmap: rootHash, - RootHash: rootHash, - PrevHash: rootHash, - } + return &dataBlock.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + } } // CreateIeleVMAndBlockchainHook creates a new instance of a iele VM func CreateIeleVMAndBlockchainHook(accnts state.AccountsAdapter) (vmcommon.VMExecutionHandler, *hooks.VMAccountsDB) { - blockChainHook, _ := hooks.NewVMAccountsDB(accnts, TestAddressConverter) - cryptoHook := hooks.NewVMCryptoHook() - vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, endpoint.ElrondTestnet) + blockChainHook, _ := hooks.NewVMAccountsDB(accnts, TestAddressConverter) + cryptoHook := hooks.NewVMCryptoHook() + vm := endpoint.NewElrondIeleVM(blockChainHook, cryptoHook, endpoint.ElrondTestnet) - return vm, blockChainHook + return vm, blockChainHook } // CreateAddressFromAddrBytes creates an address container object from address bytes provided func CreateAddressFromAddrBytes(addressBytes []byte) state.AddressContainer { - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(addressBytes) - return addr + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(addressBytes) + return addr } // CreateRandomAddress creates a random byte array with fixed size func CreateRandomAddress() state.AddressContainer { - addr, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) - return addr + addr, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) + return addr } // MintAddress will create an account (if it does not exists), update the balance with required value, // save the account and commit the trie. func MintAddress(accnts state.AccountsAdapter, addressBytes []byte, value *big.Int) { - accnt, _ := accnts.GetAccountWithJournal(CreateAddressFromAddrBytes(addressBytes)) - _ = accnt.(*state.Account).SetBalanceWithJournal(value) - _, _ = accnts.Commit() + accnt, _ := accnts.GetAccountWithJournal(CreateAddressFromAddrBytes(addressBytes)) + _ = accnt.(*state.Account).SetBalanceWithJournal(value) + _, _ = accnts.Commit() } // CreateAccount creates a new account and returns the address func CreateAccount(accnts state.AccountsAdapter, nonce uint64, balance *big.Int) state.AddressContainer { - address, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) - account, _ := accnts.GetAccountWithJournal(address) - _ = account.(*state.Account).SetNonceWithJournal(nonce) - _ = account.(*state.Account).SetBalanceWithJournal(balance) + address, _ := TestAddressConverter.CreateAddressFromHex(CreateRandomHexString(64)) + account, _ := accnts.GetAccountWithJournal(address) + _ = account.(*state.Account).SetNonceWithJournal(nonce) + _ = account.(*state.Account).SetBalanceWithJournal(balance) - return address + return address } // MakeDisplayTable will output a string containing counters for received transactions, headers, miniblocks and // meta headers for all provided test nodes func MakeDisplayTable(nodes []*TestProcessorNode) string { - header := []string{"pk", "shard ID", "txs", "miniblocks", "headers", "metachain headers"} - dataLines := make([]*display.LineData, len(nodes)) - - for idx, n := range nodes { - dataLines[idx] = display.NewLineData( - false, - []string{ - hex.EncodeToString(n.OwnAccount.PkTxSignBytes), - fmt.Sprintf("%d", n.ShardCoordinator.SelfId()), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterTxRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), - fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), - }, - ) - } - table, _ := display.CreateTableString(header, dataLines) - - return table + header := []string{"pk", "shard ID", "txs", "miniblocks", "headers", "metachain headers"} + dataLines := make([]*display.LineData, len(nodes)) + + for idx, n := range nodes { + dataLines[idx] = display.NewLineData( + false, + []string{ + hex.EncodeToString(n.OwnAccount.PkTxSignBytes), + fmt.Sprintf("%d", n.ShardCoordinator.SelfId()), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterTxRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), + fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), + }, + ) + } + table, _ := display.CreateTableString(header, dataLines) + + return table } // PrintShardAccount outputs on console a shard account data contained func PrintShardAccount(accnt *state.Account, tag string) { - str := fmt.Sprintf("%s Address: %s\n", tag, base64.StdEncoding.EncodeToString(accnt.AddressContainer().Bytes())) - str += fmt.Sprintf(" Nonce: %d\n", accnt.Nonce) - str += fmt.Sprintf(" Balance: %d\n", accnt.Balance.Uint64()) - str += fmt.Sprintf(" Code hash: %s\n", base64.StdEncoding.EncodeToString(accnt.CodeHash)) - str += fmt.Sprintf(" Root hash: %s\n", base64.StdEncoding.EncodeToString(accnt.RootHash)) + str := fmt.Sprintf("%s Address: %s\n", tag, base64.StdEncoding.EncodeToString(accnt.AddressContainer().Bytes())) + str += fmt.Sprintf(" Nonce: %d\n", accnt.Nonce) + str += fmt.Sprintf(" Balance: %d\n", accnt.Balance.Uint64()) + str += fmt.Sprintf(" Code hash: %s\n", base64.StdEncoding.EncodeToString(accnt.CodeHash)) + str += fmt.Sprintf(" Root hash: %s\n", base64.StdEncoding.EncodeToString(accnt.RootHash)) - fmt.Println(str) + fmt.Println(str) } // CreateRandomHexString returns a string encoded in hex with the given size func CreateRandomHexString(chars int) string { - if chars < 1 { - return "" - } + if chars < 1 { + return "" + } - buff := make([]byte, chars/2) - _, _ = rand.Reader.Read(buff) + buff := make([]byte, chars/2) + _, _ = rand.Reader.Read(buff) - return hex.EncodeToString(buff) + return hex.EncodeToString(buff) } // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { - adr := CreateRandomAddress() - adb, _, _ := CreateAccountsDB(factory.UserAccount) - account, _ := state.NewAccount(adr, adb) + adr := CreateRandomAddress() + adb, _, _ := CreateAccountsDB(factory.UserAccount) + account, _ := state.NewAccount(adr, adb) - return adr, account, adb + return adr, account, adb } // AdbEmulateBalanceTxSafeExecution emulates a tx execution by altering the accounts // balance and nonce, and printing any encountered error func AdbEmulateBalanceTxSafeExecution(acntSrc, acntDest *state.Account, accounts state.AccountsAdapter, value *big.Int) { - snapshot := accounts.JournalLen() - err := AdbEmulateBalanceTxExecution(acntSrc, acntDest, value) + snapshot := accounts.JournalLen() + err := AdbEmulateBalanceTxExecution(acntSrc, acntDest, value) - if err != nil { - fmt.Printf("Error executing tx (value: %v), reverting...\n", value) - err = accounts.RevertToSnapshot(snapshot) + if err != nil { + fmt.Printf("Error executing tx (value: %v), reverting...\n", value) + err = accounts.RevertToSnapshot(snapshot) - if err != nil { - panic(err) - } - } + if err != nil { + panic(err) + } + } } // AdbEmulateBalanceTxExecution emulates a tx execution by altering the accounts // balance and nonce, and printing any encountered error func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.Int) error { - srcVal := acntSrc.Balance - destVal := acntDest.Balance + srcVal := acntSrc.Balance + destVal := acntDest.Balance - if srcVal.Cmp(value) < 0 { - return errors.New("not enough funds") - } + if srcVal.Cmp(value) < 0 { + return errors.New("not enough funds") + } - err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) - if err != nil { - return err - } + err := acntSrc.SetBalanceWithJournal(srcVal.Sub(srcVal, value)) + if err != nil { + return err + } - err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) - if err != nil { - return err - } + err = acntDest.SetBalanceWithJournal(destVal.Add(destVal, value)) + if err != nil { + return err + } - err = acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) - if err != nil { - return err - } + err = acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) + if err != nil { + return err + } - return nil + return nil } // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor( - accnts, - TestHasher, - TestAddressConverter, - TestMarshalizer, - shardCoordinator, - &mock.SCProcessorMock{}, - &mock.UnsignedTxHandlerMock{}, - &mock.TxTypeHandlerMock{}, - ) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + txProcessor, _ := txProc.NewTxProcessor( + accnts, + TestHasher, + TestAddressConverter, + TestMarshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) - return txProcessor + return txProcessor } // CreateNewDefaultTrie returns a new trie with test hasher and marsahalizer func CreateNewDefaultTrie() data.Trie { - tr, _ := trie.NewTrie(CreateMemUnit(), TestMarshalizer, TestHasher) - return tr + tr, _ := trie.NewTrie(CreateMemUnit(), TestMarshalizer, TestHasher) + return tr } // GenerateRandomSlice returns a random byte slice with the given size func GenerateRandomSlice(size int) []byte { - buff := make([]byte, size) - _, _ = rand.Reader.Read(buff) + buff := make([]byte, size) + _, _ = rand.Reader.Read(buff) - return buff + return buff } // MintAllNodes will take each shard node (n) and will mint all nodes that have their pk managed by the iterating node n func MintAllNodes(nodes []*TestProcessorNode, value *big.Int) { - for idx, n := range nodes { - if n.ShardCoordinator.SelfId() == sharding.MetachainShardId { - continue - } + for idx, n := range nodes { + if n.ShardCoordinator.SelfId() == sharding.MetachainShardId { + continue + } - mintAddressesFromSameShard(nodes, idx, value) - } + mintAddressesFromSameShard(nodes, idx, value) + } } func mintAddressesFromSameShard(nodes []*TestProcessorNode, targetNodeIdx int, value *big.Int) { - targetNode := nodes[targetNodeIdx] + targetNode := nodes[targetNodeIdx] - for _, n := range nodes { - shardId := targetNode.ShardCoordinator.ComputeId(n.OwnAccount.Address) - if shardId != targetNode.ShardCoordinator.SelfId() { - continue - } + for _, n := range nodes { + shardId := targetNode.ShardCoordinator.ComputeId(n.OwnAccount.Address) + if shardId != targetNode.ShardCoordinator.SelfId() { + continue + } - n.OwnAccount.Balance = big.NewInt(0).Set(value) - MintAddress(targetNode.AccntState, n.OwnAccount.PkTxSignBytes, value) - } + n.OwnAccount.Balance = big.NewInt(0).Set(value) + MintAddress(targetNode.AccntState, n.OwnAccount.PkTxSignBytes, value) + } } // MintAllPlayers mints addresses for all players func MintAllPlayers(nodes []*TestProcessorNode, players []*TestWalletAccount, value *big.Int) { - shardCoordinator := nodes[0].ShardCoordinator + shardCoordinator := nodes[0].ShardCoordinator - for _, player := range players { - pShardId := shardCoordinator.ComputeId(player.Address) + for _, player := range players { + pShardId := shardCoordinator.ComputeId(player.Address) - for _, n := range nodes { - if pShardId != n.ShardCoordinator.SelfId() { - continue - } + for _, n := range nodes { + if pShardId != n.ShardCoordinator.SelfId() { + continue + } - MintAddress(n.AccntState, player.Address.Bytes(), value) - player.Balance = big.NewInt(0).Set(value) - } - } + MintAddress(n.AccntState, player.Address.Bytes(), value) + player.Balance = big.NewInt(0).Set(value) + } + } } // IncrementAndPrintRound increments the given variable, and prints the message for the beginning of the round func IncrementAndPrintRound(round uint64) uint64 { - round++ - fmt.Printf("#################################### ROUND %d BEGINS ####################################\n\n", round) + round++ + fmt.Printf("#################################### ROUND %d BEGINS ####################################\n\n", round) - return round + return round } // ProposeBlock proposes a block with SC txs for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { - fmt.Println("All shards propose blocks...") - for idx, n := range nodes { - if !IsIntInSlice(idx, idxProposers) { - continue - } + fmt.Println("All shards propose blocks...") + for idx, n := range nodes { + if !IsIntInSlice(idx, idxProposers) { + continue + } - body, header, _ := n.ProposeBlock(round, nonce) - n.BroadcastBlock(body, header) - n.CommitBlock(body, header) - } + body, header, _ := n.ProposeBlock(round, nonce) + n.BroadcastBlock(body, header) + n.CommitBlock(body, header) + } - fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) - fmt.Println(MakeDisplayTable(nodes)) + fmt.Println("Delaying for disseminating headers and miniblocks...") + time.Sleep(stepDelay) + fmt.Println(MakeDisplayTable(nodes)) } // SyncBlock synchronizes the proposed block in all the other shard nodes func SyncBlock( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, ) { - fmt.Println("All other shard nodes sync the proposed block...") - for idx, n := range nodes { - if IsIntInSlice(idx, idxProposers) { - continue - } + fmt.Println("All other shard nodes sync the proposed block...") + for idx, n := range nodes { + if IsIntInSlice(idx, idxProposers) { + continue + } - err := n.SyncNode(round) - if err != nil { - assert.Fail(t, err.Error()) - return - } - } + err := n.SyncNode(round) + if err != nil { + assert.Fail(t, err.Error()) + return + } + } - time.Sleep(stepDelay) - fmt.Println(MakeDisplayTable(nodes)) + time.Sleep(stepDelay) + fmt.Println(MakeDisplayTable(nodes)) } // IsIntInSlice returns true if idx is found on any position in the provided slice func IsIntInSlice(idx int, slice []int) bool { - for _, value := range slice { - if value == idx { - return true - } - } + for _, value := range slice { + if value == idx { + return true + } + } - return false + return false } // Uint32InSlice checks if a uint32 value is in a slice func Uint32InSlice(searched uint32, list []uint32) bool { - for _, val := range list { - if val == searched { - return true - } - } - return false + for _, val := range list { + if val == searched { + return true + } + } + return false } // CheckRootHashes checks the root hash of the proposer in every shard func CheckRootHashes(t *testing.T, nodes []*TestProcessorNode, idxProposers []int) { - for _, idx := range idxProposers { - checkRootHashInShard(t, nodes, idx) - } + for _, idx := range idxProposers { + checkRootHashInShard(t, nodes, idx) + } } func checkRootHashInShard(t *testing.T, nodes []*TestProcessorNode, idxProposer int) { - proposerNode := nodes[idxProposer] - proposerRootHash, _ := proposerNode.AccntState.RootHash() + proposerNode := nodes[idxProposer] + proposerRootHash, _ := proposerNode.AccntState.RootHash() - for i := 0; i < len(nodes); i++ { - n := nodes[i] + for i := 0; i < len(nodes); i++ { + n := nodes[i] - if n.ShardCoordinator.SelfId() != proposerNode.ShardCoordinator.SelfId() { - continue - } + if n.ShardCoordinator.SelfId() != proposerNode.ShardCoordinator.SelfId() { + continue + } - fmt.Printf("Testing roothash for node index %d, shard ID %d...\n", i, n.ShardCoordinator.SelfId()) - nodeRootHash, _ := n.AccntState.RootHash() - assert.Equal(t, proposerRootHash, nodeRootHash) - } + fmt.Printf("Testing roothash for node index %d, shard ID %d...\n", i, n.ShardCoordinator.SelfId()) + nodeRootHash, _ := n.AccntState.RootHash() + assert.Equal(t, proposerRootHash, nodeRootHash) + } } // CheckTxPresentAndRightNonce verifies that the nonce was updated correctly after the exec of bulk txs func CheckTxPresentAndRightNonce( - t *testing.T, - startingNonce uint64, - noOfTxs int, - txHashes [][]byte, - txs []data.TransactionHandler, - cache dataRetriever.ShardedDataCacherNotifier, - shardCoordinator sharding.Coordinator, + t *testing.T, + startingNonce uint64, + noOfTxs int, + txHashes [][]byte, + txs []data.TransactionHandler, + cache dataRetriever.ShardedDataCacherNotifier, + shardCoordinator sharding.Coordinator, ) { - if noOfTxs != len(txHashes) { - for i := startingNonce; i < startingNonce+uint64(noOfTxs); i++ { - found := false - - for _, txHandler := range txs { - nonce := extractUint64ValueFromTxHandler(txHandler) - if nonce == i { - found = true - break - } - } - - if !found { - fmt.Printf("unsigned tx with nonce %d is missing\n", i) - } - } - assert.Fail(t, fmt.Sprintf("should have been %d, got %d", noOfTxs, len(txHashes))) - - return - } - - bitmap := make([]bool, noOfTxs+int(startingNonce)) - //set for each nonce from found tx a true flag in bitmap - for i := 0; i < noOfTxs; i++ { - selfId := shardCoordinator.SelfId() - shardDataStore := cache.ShardDataStore(process.ShardCacherIdentifier(selfId, selfId)) - val, _ := shardDataStore.Get(txHashes[i]) - if val == nil { - continue - } - - nonce := extractUint64ValueFromTxHandler(val.(data.TransactionHandler)) - bitmap[nonce] = true - } - - //for the first startingNonce values, the bitmap should be false - //for the rest, true - for i := 0; i < noOfTxs+int(startingNonce); i++ { - if i < int(startingNonce) { - assert.False(t, bitmap[i]) - continue - } - - assert.True(t, bitmap[i]) - } + if noOfTxs != len(txHashes) { + for i := startingNonce; i < startingNonce+uint64(noOfTxs); i++ { + found := false + + for _, txHandler := range txs { + nonce := extractUint64ValueFromTxHandler(txHandler) + if nonce == i { + found = true + break + } + } + + if !found { + fmt.Printf("unsigned tx with nonce %d is missing\n", i) + } + } + assert.Fail(t, fmt.Sprintf("should have been %d, got %d", noOfTxs, len(txHashes))) + + return + } + + bitmap := make([]bool, noOfTxs+int(startingNonce)) + //set for each nonce from found tx a true flag in bitmap + for i := 0; i < noOfTxs; i++ { + selfId := shardCoordinator.SelfId() + shardDataStore := cache.ShardDataStore(process.ShardCacherIdentifier(selfId, selfId)) + val, _ := shardDataStore.Get(txHashes[i]) + if val == nil { + continue + } + + nonce := extractUint64ValueFromTxHandler(val.(data.TransactionHandler)) + bitmap[nonce] = true + } + + //for the first startingNonce values, the bitmap should be false + //for the rest, true + for i := 0; i < noOfTxs+int(startingNonce); i++ { + if i < int(startingNonce) { + assert.False(t, bitmap[i]) + continue + } + + assert.True(t, bitmap[i]) + } } func extractUint64ValueFromTxHandler(txHandler data.TransactionHandler) uint64 { - tx, ok := txHandler.(*transaction.Transaction) - if ok { - return tx.Nonce - } + tx, ok := txHandler.(*transaction.Transaction) + if ok { + return tx.Nonce + } - buff, _ := hex.DecodeString(txHandler.GetData()) - return binary.BigEndian.Uint64(buff) + buff, _ := hex.DecodeString(txHandler.GetData()) + return binary.BigEndian.Uint64(buff) } // CreateNodes creates multiple nodes in different shards func CreateNodes( - numOfShards int, - nodesPerShard int, - numMetaChainNodes int, - serviceID string, + numOfShards int, + nodesPerShard int, + numMetaChainNodes int, + serviceID string, ) []*TestProcessorNode { - //first node generated will have is pk belonging to firstSkShardId - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + //first node generated will have is pk belonging to firstSkShardId + nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - idx := 0 - for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { - for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) + idx := 0 + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { + for j := 0; j < nodesPerShard; j++ { + n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) - nodes[idx] = n - idx++ - } - } + nodes[idx] = n + idx++ + } + } - for i := 0; i < numMetaChainNodes; i++ { - metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) - idx = i + numOfShards*nodesPerShard - nodes[idx] = metaNode - } + for i := 0; i < numMetaChainNodes; i++ { + metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) + idx = i + numOfShards*nodesPerShard + nodes[idx] = metaNode + } - return nodes + return nodes } // DisplayAndStartNodes prints each nodes shard ID, sk and pk, and then starts the node func DisplayAndStartNodes(nodes []*TestProcessorNode) { - for _, n := range nodes { - skBuff, _ := n.OwnAccount.SkTxSign.ToByteArray() - pkBuff, _ := n.OwnAccount.PkTxSign.ToByteArray() + for _, n := range nodes { + skBuff, _ := n.OwnAccount.SkTxSign.ToByteArray() + pkBuff, _ := n.OwnAccount.PkTxSign.ToByteArray() - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.ShardCoordinator.SelfId(), - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.Node.Start() - _ = n.Node.P2PBootstrap() - } + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.ShardCoordinator.SelfId(), + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.Node.Start() + _ = n.Node.P2PBootstrap() + } - fmt.Println("Delaying for node bootstrap and topic announcement...") - time.Sleep(p2pBootstrapStepDelay) + fmt.Println("Delaying for node bootstrap and topic announcement...") + time.Sleep(p2pBootstrapStepDelay) } // GenerateAndDisseminateTxs generates and sends multiple txs func GenerateAndDisseminateTxs( - n *TestProcessorNode, - senders []crypto.PrivateKey, - receiversPrivateKeys map[uint32][]crypto.PrivateKey, - valToTransfer *big.Int, - gasPrice uint64, - gasLimit uint64, + n *TestProcessorNode, + senders []crypto.PrivateKey, + receiversPrivateKeys map[uint32][]crypto.PrivateKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { - for i := 0; i < len(senders); i++ { - senderKey := senders[i] - incrementalNonce := make([]uint64, len(senders)) - for _, recvPrivateKeys := range receiversPrivateKeys { - receiverKey := recvPrivateKeys[i] - tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) - _, _ = n.SendTransaction(tx) - incrementalNonce[i]++ - } - } + for i := 0; i < len(senders); i++ { + senderKey := senders[i] + incrementalNonce := make([]uint64, len(senders)) + for _, recvPrivateKeys := range receiversPrivateKeys { + receiverKey := recvPrivateKeys[i] + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) + _, _ = n.SendTransaction(tx) + incrementalNonce[i]++ + } + } } type txArgs struct { - nonce uint64 - value *big.Int - rcvAddr []byte - sndAddr []byte - data string - gasPrice int - gasLimit int + nonce uint64 + value *big.Int + rcvAddr []byte + sndAddr []byte + data string + gasPrice int + gasLimit int } func generateTransferTx( - nonce uint64, - sender crypto.PrivateKey, - receiver crypto.PrivateKey, - valToTransfer *big.Int, - gasPrice uint64, - gasLimit uint64, + nonce uint64, + sender crypto.PrivateKey, + receiver crypto.PrivateKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) *transaction.Transaction { - tx := transaction.Transaction{ - Nonce: nonce, - Value: valToTransfer, - RcvAddr: skToPk(receiver), - SndAddr: skToPk(sender), - Data: "", - GasLimit: gasLimit, - GasPrice: gasPrice, - } - txBuff, _ := TestMarshalizer.Marshal(&tx) - signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(sender, txBuff) + tx := transaction.Transaction{ + Nonce: nonce, + Value: valToTransfer, + RcvAddr: skToPk(receiver), + SndAddr: skToPk(sender), + Data: "", + GasLimit: gasLimit, + GasPrice: gasPrice, + } + txBuff, _ := TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(sender, txBuff) - return &tx + return &tx } func generateTx( - skSign crypto.PrivateKey, - signer crypto.SingleSigner, - args *txArgs, + skSign crypto.PrivateKey, + signer crypto.SingleSigner, + args *txArgs, ) *transaction.Transaction { - tx := &transaction.Transaction{ - Nonce: args.nonce, - Value: args.value, - RcvAddr: args.rcvAddr, - SndAddr: args.sndAddr, - GasPrice: uint64(args.gasPrice), - GasLimit: uint64(args.gasLimit), - Data: args.data, - } - txBuff, _ := TestMarshalizer.Marshal(tx) - tx.Signature, _ = signer.Sign(skSign, txBuff) + tx := &transaction.Transaction{ + Nonce: args.nonce, + Value: args.value, + RcvAddr: args.rcvAddr, + SndAddr: args.sndAddr, + GasPrice: uint64(args.gasPrice), + GasLimit: uint64(args.gasLimit), + Data: args.data, + } + txBuff, _ := TestMarshalizer.Marshal(tx) + tx.Signature, _ = signer.Sign(skSign, txBuff) - return tx + return tx } func skToPk(sk crypto.PrivateKey) []byte { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - return pkBuff + pkBuff, _ := sk.GeneratePublic().ToByteArray() + return pkBuff } // TestPrivateKeyHasBalance checks if the private key has the expected balance func TestPrivateKeyHasBalance(t *testing.T, n *TestProcessorNode, sk crypto.PrivateKey, expectedBalance *big.Int) { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) - account, _ := n.AccntState.GetExistingAccount(addr) - assert.Equal(t, expectedBalance, account.(*state.Account).Balance) + pkBuff, _ := sk.GeneratePublic().ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetExistingAccount(addr) + assert.Equal(t, expectedBalance, account.(*state.Account).Balance) } // GetMiniBlocksHashesFromShardIds returns miniblock hashes from body func GetMiniBlocksHashesFromShardIds(body dataBlock.Body, shardIds ...uint32) [][]byte { - hashes := make([][]byte, 0) + hashes := make([][]byte, 0) - for _, miniblock := range body { - for _, shardId := range shardIds { - if miniblock.ReceiverShardID == shardId { - buff, _ := TestMarshalizer.Marshal(miniblock) - hashes = append(hashes, TestHasher.Compute(string(buff))) - } - } - } + for _, miniblock := range body { + for _, shardId := range shardIds { + if miniblock.ReceiverShardID == shardId { + buff, _ := TestMarshalizer.Marshal(miniblock) + hashes = append(hashes, TestHasher.Compute(string(buff))) + } + } + } - return hashes + return hashes } // GenerateSkAndPkInShard generates and returns a private and a public key that reside in a given shard. // It also returns the key generator func GenerateSkAndPkInShard( - coordinator sharding.Coordinator, - shardId uint32, + coordinator sharding.Coordinator, + shardId uint32, ) (crypto.PrivateKey, crypto.PublicKey, crypto.KeyGenerator) { - suite := kyber.NewBlakeSHA256Ed25519() - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() + suite := kyber.NewBlakeSHA256Ed25519() + keyGen := signing.NewKeyGenerator(suite) + sk, pk := keyGen.GeneratePair() - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if coordinator.ComputeId(addr) == shardId { - break - } - sk, pk = keyGen.GeneratePair() - } + for { + pkBytes, _ := pk.ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) + if coordinator.ComputeId(addr) == shardId { + break + } + sk, pk = keyGen.GeneratePair() + } - return sk, pk, keyGen + return sk, pk, keyGen } // CreateMintingForSenders creates account with balances for every node in a given shard func CreateMintingForSenders( - nodes []*TestProcessorNode, - senderShard uint32, - sendersPrivateKeys []crypto.PrivateKey, - value *big.Int, + nodes []*TestProcessorNode, + senderShard uint32, + sendersPrivateKeys []crypto.PrivateKey, + value *big.Int, ) { - for _, n := range nodes { - //only sender shard nodes will be minted - if n.ShardCoordinator.SelfId() != senderShard { - continue - } + for _, n := range nodes { + //only sender shard nodes will be minted + if n.ShardCoordinator.SelfId() != senderShard { + continue + } - for _, sk := range sendersPrivateKeys { - pkBuff, _ := sk.GeneratePublic().ToByteArray() - adr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) - account, _ := n.AccntState.GetAccountWithJournal(adr) - _ = account.(*state.Account).SetBalanceWithJournal(value) - } + for _, sk := range sendersPrivateKeys { + pkBuff, _ := sk.GeneratePublic().ToByteArray() + adr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetAccountWithJournal(adr) + _ = account.(*state.Account).SetBalanceWithJournal(value) + } - _, _ = n.AccntState.Commit() - } + _, _ = n.AccntState.Commit() + } } // ProposeBlockSignalsEmptyBlock proposes and broadcasts a block func ProposeBlockSignalsEmptyBlock( - node *TestProcessorNode, - round uint64, - nonce uint64, + node *TestProcessorNode, + round uint64, + nonce uint64, ) (data.HeaderHandler, data.BodyHandler, bool) { - fmt.Println("Proposing block without commit...") + fmt.Println("Proposing block without commit...") - body, header, txHashes := node.ProposeBlock(round, nonce) - node.BroadcastBlock(body, header) - isEmptyBlock := len(txHashes) == 0 + body, header, txHashes := node.ProposeBlock(round, nonce) + node.BroadcastBlock(body, header) + isEmptyBlock := len(txHashes) == 0 - fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) + fmt.Println("Delaying for disseminating headers and miniblocks...") + time.Sleep(stepDelay) - return header, body, isEmptyBlock + return header, body, isEmptyBlock } // CreateAccountForNodes creates accounts for each node and commits the accounts state func CreateAccountForNodes(nodes []*TestProcessorNode) { - for i := 0; i < len(nodes); i++ { - CreateAccountForNode(nodes[i]) - } + for i := 0; i < len(nodes); i++ { + CreateAccountForNode(nodes[i]) + } } // CreateAccountForNode creates an account for the given node func CreateAccountForNode(node *TestProcessorNode) { - addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(node.OwnAccount.PkTxSignBytes) - _, _ = node.AccntState.GetAccountWithJournal(addr) - _, _ = node.AccntState.Commit() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(node.OwnAccount.PkTxSignBytes) + _, _ = node.AccntState.GetAccountWithJournal(addr) + _, _ = node.AccntState.Commit() } // ComputeAndRequestMissingTransactions computes missing transactions for each node, and requests them func ComputeAndRequestMissingTransactions( - nodes []*TestProcessorNode, - generatedTxHashes [][]byte, - shardResolver uint32, - shardRequesters ...uint32, + nodes []*TestProcessorNode, + generatedTxHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, ) { - for _, n := range nodes { - if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { - continue - } + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } - neededTxs := getMissingTxsForNode(n, generatedTxHashes) - requestMissingTransactions(n, shardResolver, neededTxs) - } + neededTxs := getMissingTxsForNode(n, generatedTxHashes) + requestMissingTransactions(n, shardResolver, neededTxs) + } } func ComputeAndRequestMissingRewardTxs( - nodes []*TestProcessorNode, - generatedDataHashes [][]byte, - shardResolver uint32, - shardRequesters ...uint32, + nodes []*TestProcessorNode, + generatedDataHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, ) { - for _, n := range nodes { - if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { - continue - } + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } - neededData := getMissingRewardTxsForNode(n, generatedDataHashes) - requestMissingRewardTxs(n, shardResolver, neededData) - } + neededData := getMissingRewardTxsForNode(n, generatedDataHashes) + requestMissingRewardTxs(n, shardResolver, neededData) + } } func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { - neededTxs := make([][]byte, 0) + neededTxs := make([][]byte, 0) - for i := 0; i < len(generatedTxHashes); i++ { - _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) - if !ok { - //tx is still missing - neededTxs = append(neededTxs, generatedTxHashes[i]) - } - } + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) + if !ok { + //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } - return neededTxs + return neededTxs } func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { - neededTxs := make([][]byte, 0) + neededTxs := make([][]byte, 0) - for i := 0; i < len(generatedTxHashes); i++ { - _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) - if !ok { - //tx is still missing - neededTxs = append(neededTxs, generatedTxHashes[i]) - } - } + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) + if !ok { + //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } - return neededTxs + return neededTxs } func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, neededTxs [][]byte) { - txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) + txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) - for i := 0; i < len(neededTxs); i++ { - _ = txResolver.RequestDataFromHash(neededTxs[i]) - } + for i := 0; i < len(neededTxs); i++ { + _ = txResolver.RequestDataFromHash(neededTxs[i]) + } } func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededData [][]byte) { - dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) + dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) - for i := 0; i < len(neededData); i++ { - _ = dataResolver.RequestDataFromHash(neededData[i]) - } + for i := 0; i < len(neededData); i++ { + _ = dataResolver.RequestDataFromHash(neededData[i]) + } } // CreateRequesterDataPool creates a datapool with a mock txPool func CreateRequesterDataPool( - t *testing.T, - recvTxs map[int]map[string]struct{}, - mutRecvTxs *sync.Mutex, - nodeIndex int, - nbShards uint32, + t *testing.T, + recvTxs map[int]map[string]struct{}, + mutRecvTxs *sync.Mutex, + nodeIndex int, + nbShards uint32, ) dataRetriever.PoolsHolder { - //not allowed to request data from the same shard - return CreateTestShardDataPool( - &mock.ShardedDataStub{ - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil, false - }, - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - - txMap := recvTxs[nodeIndex] - if txMap == nil { - txMap = make(map[string]struct{}) - recvTxs[nodeIndex] = txMap - } - - txMap[string(key)] = struct{}{} - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, - }, - nbShards, - ) + //not allowed to request data from the same shard + return CreateTestShardDataPool( + &mock.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil, false + }, + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + + txMap := recvTxs[nodeIndex] + if txMap == nil { + txMap = make(map[string]struct{}) + recvTxs[nodeIndex] = txMap + } + + txMap[string(key)] = struct{}{} + }, + RegisterHandlerCalled: func(i func(key []byte)) { + }, + }, + nbShards, + ) } // CreateResolversDataPool creates a datapool containing a given number of transactions func CreateResolversDataPool( - t *testing.T, - maxTxs int, - senderShardID uint32, - recvShardId uint32, - shardCoordinator sharding.Coordinator, + t *testing.T, + maxTxs int, + senderShardID uint32, + recvShardId uint32, + shardCoordinator sharding.Coordinator, ) (dataRetriever.PoolsHolder, [][]byte) { - txHashes := make([][]byte, maxTxs) + txHashes := make([][]byte, maxTxs) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - for i := 0; i < maxTxs; i++ { - tx, txHash := generateValidTx(t, shardCoordinator, senderShardID, recvShardId) - cacherIdentifier := process.ShardCacherIdentifier(1, 0) - txPool.AddData(txHash, tx, cacherIdentifier) - txHashes[i] = txHash - } + for i := 0; i < maxTxs; i++ { + tx, txHash := generateValidTx(t, shardCoordinator, senderShardID, recvShardId) + cacherIdentifier := process.ShardCacherIdentifier(1, 0) + txPool.AddData(txHash, tx, cacherIdentifier) + txHashes[i] = txHash + } - return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes + return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes } func generateValidTx( - t *testing.T, - shardCoordinator sharding.Coordinator, - senderShardId uint32, - receiverShardId uint32, + t *testing.T, + shardCoordinator sharding.Coordinator, + senderShardId uint32, + receiverShardId uint32, ) (*transaction.Transaction, []byte) { - skSender, pkSender, _ := GenerateSkAndPkInShard(shardCoordinator, senderShardId) - pkSenderBuff, _ := pkSender.ToByteArray() + skSender, pkSender, _ := GenerateSkAndPkInShard(shardCoordinator, senderShardId) + pkSenderBuff, _ := pkSender.ToByteArray() - _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) - pkRecvBuff, _ := pkRecv.ToByteArray() + _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) + pkRecvBuff, _ := pkRecv.ToByteArray() - accnts, _, _ := CreateAccountsDB(factory.UserAccount) - addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) - _, _ = accnts.GetAccountWithJournal(addrSender) - _, _ = accnts.Commit() + accnts, _, _ := CreateAccountsDB(factory.UserAccount) + addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) + _, _ = accnts.GetAccountWithJournal(addrSender) + _, _ = accnts.Commit() - mockNode, _ := node.NewNode( - node.WithMarshalizer(TestMarshalizer), - node.WithHasher(TestHasher), - node.WithAddressConverter(TestAddressConverter), - node.WithKeyGen(signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519())), - node.WithTxSingleSigner(&singlesig.SchnorrSigner{}), - node.WithTxSignPrivKey(skSender), - node.WithTxSignPubKey(pkSender), - node.WithAccountsAdapter(accnts), - ) + mockNode, _ := node.NewNode( + node.WithMarshalizer(TestMarshalizer), + node.WithHasher(TestHasher), + node.WithAddressConverter(TestAddressConverter), + node.WithKeyGen(signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519())), + node.WithTxSingleSigner(&singlesig.SchnorrSigner{}), + node.WithTxSignPrivKey(skSender), + node.WithTxSignPubKey(pkSender), + node.WithAccountsAdapter(accnts), + ) - tx, err := mockNode.GenerateTransaction( - hex.EncodeToString(pkSenderBuff), - hex.EncodeToString(pkRecvBuff), - big.NewInt(1), - "", - ) - assert.Nil(t, err) + tx, err := mockNode.GenerateTransaction( + hex.EncodeToString(pkSenderBuff), + hex.EncodeToString(pkRecvBuff), + big.NewInt(1), + "", + ) + assert.Nil(t, err) - txBuff, _ := TestMarshalizer.Marshal(tx) - txHash := TestHasher.Compute(string(txBuff)) + txBuff, _ := TestMarshalizer.Marshal(tx) + txHash := TestHasher.Compute(string(txBuff)) - return tx, txHash + return tx, txHash } // GetNumTxsWithDst returns the total number of transactions that have a certain destination shard func GetNumTxsWithDst(dstShardId uint32, dataPool dataRetriever.PoolsHolder, nrShards uint32) int { - txPool := dataPool.Transactions() - if txPool == nil { - return 0 - } + txPool := dataPool.Transactions() + if txPool == nil { + return 0 + } - sumTxs := 0 + sumTxs := 0 - for i := uint32(0); i < nrShards; i++ { - strCache := process.ShardCacherIdentifier(i, dstShardId) - txStore := txPool.ShardDataStore(strCache) - if txStore == nil { - continue - } - sumTxs += txStore.Len() - } + for i := uint32(0); i < nrShards; i++ { + strCache := process.ShardCacherIdentifier(i, dstShardId) + txStore := txPool.ShardDataStore(strCache) + if txStore == nil { + continue + } + sumTxs += txStore.Len() + } - return sumTxs + return sumTxs } // ProposeAndSyncBlocks proposes and syncs blocks until all transaction pools are empty func ProposeAndSyncBlocks( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, - nonce uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, + nonce uint64, ) (uint64, uint64) { - // if there are many transactions, they might not fit into the block body in only one round - for { - numTxsInPool := 0 - round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + // if there are many transactions, they might not fit into the block body in only one round + for { + numTxsInPool := 0 + round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - for _, idProposer := range idxProposers { - proposerNode := nodes[idProposer] - numTxsInPool = GetNumTxsWithDst( - proposerNode.ShardCoordinator.SelfId(), - proposerNode.ShardDataPool, - proposerNode.ShardCoordinator.NumberOfShards(), - ) + for _, idProposer := range idxProposers { + proposerNode := nodes[idProposer] + numTxsInPool = GetNumTxsWithDst( + proposerNode.ShardCoordinator.SelfId(), + proposerNode.ShardDataPool, + proposerNode.ShardCoordinator.NumberOfShards(), + ) - if numTxsInPool > 0 { - break - } - } + if numTxsInPool > 0 { + break + } + } - if numTxsInPool == 0 { - break - } - } + if numTxsInPool == 0 { + break + } + } - if nodes[0].ShardCoordinator.NumberOfShards() == 1 { - return round, nonce - } + if nodes[0].ShardCoordinator.NumberOfShards() == 1 { + return round, nonce + } - // cross shard smart contract call is first processed at sender shard, notarized by metachain, processed at - // shard with smart contract, smart contract result is notarized by metachain, then finally processed at the - // sender shard - numberToPropagateToEveryShard := 5 - for i := 0; i < numberToPropagateToEveryShard; i++ { - round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - } + // cross shard smart contract call is first processed at sender shard, notarized by metachain, processed at + // shard with smart contract, smart contract result is notarized by metachain, then finally processed at the + // sender shard + numberToPropagateToEveryShard := 5 + for i := 0; i < numberToPropagateToEveryShard; i++ { + round, nonce = ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + } - return round, nonce + return round, nonce } // ProposeAndSyncOneBlock proposes a block, syncs the block and then increments the round func ProposeAndSyncOneBlock( - t *testing.T, - nodes []*TestProcessorNode, - idxProposers []int, - round uint64, - nonce uint64, + t *testing.T, + nodes []*TestProcessorNode, + idxProposers []int, + round uint64, + nonce uint64, ) (uint64, uint64) { - ProposeBlock(nodes, idxProposers, round, nonce) - SyncBlock(t, nodes, idxProposers, round) - round = IncrementAndPrintRound(round) - nonce++ + ProposeBlock(nodes, idxProposers, round, nonce) + SyncBlock(t, nodes, idxProposers, round) + round = IncrementAndPrintRound(round) + nonce++ - return round, nonce + return round, nonce } // PubKeysMapFromKeysMap returns a map of public keys per shard from the key pairs per shard map. func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { - keysMap := make(map[uint32][]string, 0) + keysMap := make(map[uint32][]string, 0) - for shardId, pairList := range keyPairMap { - shardKeys := make([]string, len(pairList)) - for i, pair := range pairList { - b, _ := pair.Pk.ToByteArray() - shardKeys[i] = string(b) - } - keysMap[shardId] = shardKeys - } + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.Pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } - return keysMap + return keysMap } // GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { - validatorsMap := make(map[uint32][]sharding.Validator) + validatorsMap := make(map[uint32][]sharding.Validator) - for shardId, shardNodesPks := range pubKeysMap { - shardValidators := make([]sharding.Validator, 0) - for i := 0; i < len(shardNodesPks); i++ { - address := []byte(shardNodesPks[i][:32]) - v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) - shardValidators = append(shardValidators, v) - } - validatorsMap[shardId] = shardValidators - } + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + address := []byte(shardNodesPks[i][:32]) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } - return validatorsMap + return validatorsMap } // CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { - suite := kyber.NewSuitePairingBn256() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - - keysMap := make(map[uint32][]*TestKeyPair) - keyPairs := make([]*TestKeyPair, nodesPerShard) - for shardId := uint32(0); shardId < nbShards; shardId++ { - for n := 0; n < nodesPerShard; n++ { - kp := &TestKeyPair{} - kp.Sk, kp.Pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[shardId] = keyPairs - } - - keyPairs = make([]*TestKeyPair, nbMetaNodes) - for n := 0; n < nbMetaNodes; n++ { - kp := &TestKeyPair{} - kp.Sk, kp.Pk = keyGen.GeneratePair() - keyPairs[n] = kp - } - keysMap[sharding.MetachainShardId] = keyPairs - - params := &CryptoParams{ - Keys: keysMap, - KeyGen: keyGen, - SingleSigner: singleSigner, - } - - return params + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := uint32(0); shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[shardId] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &CryptoParams{ + Keys: keysMap, + KeyGen: keyGen, + SingleSigner: singleSigner, + } + + return params } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index f742a7e7950..a8b6121a2e1 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1,47 +1,47 @@ package integrationTests import ( - "context" - "encoding/hex" - "fmt" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/partitioning" - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data" - dataBlock "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" - dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" - metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" - factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" - "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" - "github.com/ElrondNetwork/elrond-go/hashing/sha256" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/node" - "github.com/ElrondNetwork/elrond-go/node/external" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory" - metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/smartContract" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" - "github.com/pkg/errors" + "context" + "encoding/hex" + "fmt" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" + factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node" + "github.com/ElrondNetwork/elrond-go/node/external" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory" + metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/pkg/errors" ) // TestHasher represents a Sha256 hasher @@ -61,769 +61,769 @@ var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { - Sk crypto.PrivateKey - Pk crypto.PublicKey + Sk crypto.PrivateKey + Pk crypto.PublicKey } //CryptoParams holds crypto parametres type CryptoParams struct { - KeyGen crypto.KeyGenerator - Keys map[uint32][]*TestKeyPair - SingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + Keys map[uint32][]*TestKeyPair + SingleSigner crypto.SingleSigner } // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator sharding.NodesCoordinator - SpecialAddressHandler process.SpecialAddressHandler - Messenger p2p.Messenger - - OwnAccount *TestWalletAccount - NodeKeys *TestKeyPair - - ShardDataPool dataRetriever.PoolsHolder - MetaDataPool dataRetriever.MetaPoolsHolder - Storage dataRetriever.StorageService - AccntState state.AccountsAdapter - BlockChain data.ChainHandler - GenesisBlocks map[uint32]data.HeaderHandler - - InterceptorsContainer process.InterceptorsContainer - ResolversContainer dataRetriever.ResolversContainer - ResolverFinder dataRetriever.ResolversFinder - RequestHandler process.RequestHandler - - InterimProcContainer process.IntermediateProcessorContainer - TxProcessor process.TransactionProcessor - TxCoordinator process.TransactionCoordinator - ScrForwarder process.IntermediateTransactionHandler - VmProcessor vmcommon.VMExecutionHandler - VmDataGetter vmcommon.VMExecutionHandler - BlockchainHook vmcommon.BlockchainHook - ArgsParser process.ArgumentsParser - ScProcessor process.SmartContractProcessor - RewardsProcessor process.RewardTransactionProcessor - PreProcessorsContainer process.PreProcessorsContainer - - ForkDetector process.ForkDetector - BlockTracker process.BlocksTracker - BlockProcessor process.BlockProcessor - BroadcastMessenger consensus.BroadcastMessenger - - MultiSigner crypto.MultiSigner - - //Node is used to call the functionality already implemented in it - Node *node.Node - ScDataGetter external.ScDataGetter - - CounterHdrRecv int32 - CounterMbRecv int32 - CounterTxRecv int32 - CounterMetaRcv int32 + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Messenger p2p.Messenger + + OwnAccount *TestWalletAccount + NodeKeys *TestKeyPair + + ShardDataPool dataRetriever.PoolsHolder + MetaDataPool dataRetriever.MetaPoolsHolder + Storage dataRetriever.StorageService + AccntState state.AccountsAdapter + BlockChain data.ChainHandler + GenesisBlocks map[uint32]data.HeaderHandler + + InterceptorsContainer process.InterceptorsContainer + ResolversContainer dataRetriever.ResolversContainer + ResolverFinder dataRetriever.ResolversFinder + RequestHandler process.RequestHandler + + InterimProcContainer process.IntermediateProcessorContainer + TxProcessor process.TransactionProcessor + TxCoordinator process.TransactionCoordinator + ScrForwarder process.IntermediateTransactionHandler + VmProcessor vmcommon.VMExecutionHandler + VmDataGetter vmcommon.VMExecutionHandler + BlockchainHook vmcommon.BlockchainHook + ArgsParser process.ArgumentsParser + ScProcessor process.SmartContractProcessor + RewardsProcessor process.RewardTransactionProcessor + PreProcessorsContainer process.PreProcessorsContainer + + ForkDetector process.ForkDetector + BlockTracker process.BlocksTracker + BlockProcessor process.BlockProcessor + BroadcastMessenger consensus.BroadcastMessenger + + MultiSigner crypto.MultiSigner + + //Node is used to call the functionality already implemented in it + Node *node.Node + ScDataGetter external.ScDataGetter + + CounterHdrRecv int32 + CounterMbRecv int32 + CounterTxRecv int32 + CounterMetaRcv int32 } // NewTestProcessorNode returns a new TestProcessorNode instance func NewTestProcessorNode( - maxShards uint32, - nodeShardId uint32, - txSignPrivKeyShardId uint32, - initialNodeAddr string, + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, ) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - nodesCoordinator := &mock.NodesCoordinatorMock{} - kg := &mock.KeyGenMock{} - sk, pk := kg.GeneratePair() - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - - tpn.NodeKeys = &TestKeyPair{ - Sk: sk, - Pk: pk, - } - tpn.MultiSigner = TestMultiSig - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) - tpn.initDataPools() - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn } // NewTestProcessorNodeWithCustomDataPool returns a new TestProcessorNode instance with the given data pool func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string, dPool dataRetriever.PoolsHolder) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - nodesCoordinator := &mock.NodesCoordinatorMock{} - kg := &mock.KeyGenMock{} - sk, pk := kg.GeneratePair() - - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - - tpn.NodeKeys = &TestKeyPair{ - Sk: sk, - Pk: pk, - } - tpn.MultiSigner = TestMultiSig - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) - if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { - tpn.ShardDataPool = dPool - } else { - tpn.initDataPools() - } - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) + if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { + tpn.ShardDataPool = dPool + } else { + tpn.initDataPools() + } + tpn.initTestNode() + + return tpn } func (tpn *TestProcessorNode) initTestNode() { - tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ - ShardCoordinator: tpn.ShardCoordinator, - AdrConv: TestAddressConverter, - } - tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(0) - tpn.initChainHandler() - tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) - tpn.initInterceptors() - tpn.initResolvers() - tpn.initInnerProcessors() - tpn.initBlockProcessor() - tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( - TestMarshalizer, - tpn.Messenger, - tpn.ShardCoordinator, - tpn.OwnAccount.SkTxSign, - tpn.OwnAccount.SingleSigner, - ) - tpn.setGenesisBlock() - tpn.initNode() - tpn.ScDataGetter, _ = smartContract.NewSCDataGetter(tpn.VmDataGetter) - tpn.addHandlersForCounters() + tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ + ShardCoordinator: tpn.ShardCoordinator, + AdrConv: TestAddressConverter, + } + tpn.initStorage() + tpn.AccntState, _, _ = CreateAccountsDB(0) + tpn.initChainHandler() + tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.initInterceptors() + tpn.initResolvers() + tpn.initInnerProcessors() + tpn.initBlockProcessor() + tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + TestMarshalizer, + tpn.Messenger, + tpn.ShardCoordinator, + tpn.OwnAccount.SkTxSign, + tpn.OwnAccount.SingleSigner, + ) + tpn.setGenesisBlock() + tpn.initNode() + tpn.ScDataGetter, _ = smartContract.NewSCDataGetter(tpn.VmDataGetter) + tpn.addHandlersForCounters() } func (tpn *TestProcessorNode) initDataPools() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.MetaDataPool = CreateTestMetaDataPool() - } else { - tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.MetaDataPool = CreateTestMetaDataPool() + } else { + tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) + } } func (tpn *TestProcessorNode) initStorage() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) - } else { - tpn.Storage = CreateShardStore(tpn.ShardCoordinator.NumberOfShards()) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) + } else { + tpn.Storage = CreateShardStore(tpn.ShardCoordinator.NumberOfShards()) + } } func (tpn *TestProcessorNode) initChainHandler() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockChain = CreateMetaChain() - } else { - tpn.BlockChain = CreateShardChain() - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.BlockChain = CreateMetaChain() + } else { + tpn.BlockChain = CreateShardChain() + } } func (tpn *TestProcessorNode) initInterceptors() { - var err error - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - TestHasher, - TestMultiSig, - tpn.MetaDataPool, - ) - - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - } else { - interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - TestHasher, - tpn.OwnAccount.KeygenTxSign, - tpn.OwnAccount.SingleSigner, - TestMultiSig, - tpn.ShardDataPool, - TestAddressConverter, - ) - - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() - if err != nil { - fmt.Println(err.Error()) - } - } + var err error + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + TestHasher, + TestMultiSig, + tpn.MetaDataPool, + ) + + tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } else { + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + TestHasher, + tpn.OwnAccount.KeygenTxSign, + tpn.OwnAccount.SingleSigner, + TestMultiSig, + tpn.ShardDataPool, + TestAddressConverter, + ) + + tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + if err != nil { + fmt.Println(err.Error()) + } + } } func (tpn *TestProcessorNode) initResolvers() { - dataPacker, _ := partitioning.NewSizeDataPacker(TestMarshalizer) - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( - tpn.ShardCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - tpn.MetaDataPool, - TestUint64Converter, - ) - - tpn.ResolversContainer, _ = resolversContainerFactory.Create() - tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) - tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( - tpn.ResolverFinder, - factory.ShardHeadersForMetachainTopic, - ) - } else { - resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( - tpn.ShardCoordinator, - tpn.Messenger, - tpn.Storage, - TestMarshalizer, - tpn.ShardDataPool, - TestUint64Converter, - dataPacker, - ) - - tpn.ResolversContainer, _ = resolversContainerFactory.Create() - tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) - tpn.RequestHandler, _ = requestHandlers.NewShardResolverRequestHandler( - tpn.ResolverFinder, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.MetachainBlocksTopic, - 100, - ) - } + dataPacker, _ := partitioning.NewSizeDataPacker(TestMarshalizer) + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + resolversContainerFactory, _ := metafactoryDataRetriever.NewResolversContainerFactory( + tpn.ShardCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + tpn.MetaDataPool, + TestUint64Converter, + ) + + tpn.ResolversContainer, _ = resolversContainerFactory.Create() + tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) + tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( + tpn.ResolverFinder, + factory.ShardHeadersForMetachainTopic, + ) + } else { + resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( + tpn.ShardCoordinator, + tpn.Messenger, + tpn.Storage, + TestMarshalizer, + tpn.ShardDataPool, + TestUint64Converter, + dataPacker, + ) + + tpn.ResolversContainer, _ = resolversContainerFactory.Create() + tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) + tpn.RequestHandler, _ = requestHandlers.NewShardResolverRequestHandler( + tpn.ResolverFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.MetachainBlocksTopic, + 100, + ) + } } func (tpn *TestProcessorNode) initInnerProcessors() { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - return - } - - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - tpn.ShardCoordinator, - TestMarshalizer, - TestHasher, - TestAddressConverter, - tpn.SpecialAddressHandler, - tpn.Storage, - tpn.ShardDataPool, - ) - - tpn.InterimProcContainer, _ = interimProcFactory.Create() - tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) - rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) - rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) - - tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( - tpn.AccntState, - TestAddressConverter, - tpn.ShardCoordinator, - rewardsInter, - ) - - tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) - tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) - - vmContainer := &mock.VMContainerMock{ - GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { - return tpn.VmProcessor, nil - }} - - tpn.ArgsParser, _ = smartContract.NewAtArgumentParser() - tpn.ScProcessor, _ = smartContract.NewSmartContractProcessor( - vmContainer, - tpn.ArgsParser, - TestHasher, - TestMarshalizer, - tpn.AccntState, - tpn.BlockchainHook.(*hooks.VMAccountsDB), - TestAddressConverter, - tpn.ShardCoordinator, - tpn.ScrForwarder, - rewardsHandler, - ) - - txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) - - tpn.TxProcessor, _ = transaction.NewTxProcessor( - tpn.AccntState, - TestHasher, - TestAddressConverter, - TestMarshalizer, - tpn.ShardCoordinator, - tpn.ScProcessor, - rewardsHandler, - txTypeHandler, - ) - - fact, _ := shard.NewPreProcessorsContainerFactory( - tpn.ShardCoordinator, - tpn.Storage, - TestMarshalizer, - TestHasher, - tpn.ShardDataPool, - TestAddressConverter, - tpn.AccntState, - tpn.RequestHandler, - tpn.TxProcessor, - tpn.ScProcessor, - tpn.ScProcessor.(process.SmartContractResultProcessor), - tpn.RewardsProcessor, - ) - tpn.PreProcessorsContainer, _ = fact.Create() - - tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( - tpn.ShardCoordinator, - tpn.AccntState, - tpn.ShardDataPool, - tpn.RequestHandler, - tpn.PreProcessorsContainer, - tpn.InterimProcContainer, - ) + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + return + } + + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + tpn.ShardCoordinator, + TestMarshalizer, + TestHasher, + TestAddressConverter, + tpn.SpecialAddressHandler, + tpn.Storage, + tpn.ShardDataPool, + ) + + tpn.InterimProcContainer, _ = interimProcFactory.Create() + tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + rewardsInter, + ) + + tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) + tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) + + vmContainer := &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return tpn.VmProcessor, nil + }} + + tpn.ArgsParser, _ = smartContract.NewAtArgumentParser() + tpn.ScProcessor, _ = smartContract.NewSmartContractProcessor( + vmContainer, + tpn.ArgsParser, + TestHasher, + TestMarshalizer, + tpn.AccntState, + tpn.BlockchainHook.(*hooks.VMAccountsDB), + TestAddressConverter, + tpn.ShardCoordinator, + tpn.ScrForwarder, + rewardsHandler, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) + + tpn.TxProcessor, _ = transaction.NewTxProcessor( + tpn.AccntState, + TestHasher, + TestAddressConverter, + TestMarshalizer, + tpn.ShardCoordinator, + tpn.ScProcessor, + rewardsHandler, + txTypeHandler, + ) + + fact, _ := shard.NewPreProcessorsContainerFactory( + tpn.ShardCoordinator, + tpn.Storage, + TestMarshalizer, + TestHasher, + tpn.ShardDataPool, + TestAddressConverter, + tpn.AccntState, + tpn.RequestHandler, + tpn.TxProcessor, + tpn.ScProcessor, + tpn.ScProcessor.(process.SmartContractResultProcessor), + tpn.RewardsProcessor, + ) + tpn.PreProcessorsContainer, _ = fact.Create() + + tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( + tpn.ShardCoordinator, + tpn.AccntState, + tpn.ShardDataPool, + tpn.RequestHandler, + tpn.PreProcessorsContainer, + tpn.InterimProcContainer, + ) } func (tpn *TestProcessorNode) initBlockProcessor() { - var err error - - tpn.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - } - - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) - } else { - tpn.BlockProcessor, err = block.NewShardProcessor( - nil, - tpn.ShardDataPool, - tpn.Storage, - TestHasher, - TestMarshalizer, - tpn.AccntState, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - tpn.ForkDetector, - tpn.BlockTracker, - tpn.GenesisBlocks, - tpn.RequestHandler, - tpn.TxCoordinator, - TestUint64Converter, - ) - } - - if err != nil { - fmt.Printf("Error creating blockprocessor: %s\n", err.Error()) - } + var err error + + tpn.ForkDetector = &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + } + + tpn.BlockTracker = &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.BlockProcessor, err = block.NewMetaProcessor( + &mock.ServiceContainerMock{}, + tpn.AccntState, + tpn.MetaDataPool, + tpn.ForkDetector, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, + TestHasher, + TestMarshalizer, + tpn.Storage, + tpn.GenesisBlocks, + tpn.RequestHandler, + TestUint64Converter, + ) + } else { + tpn.BlockProcessor, err = block.NewShardProcessor( + nil, + tpn.ShardDataPool, + tpn.Storage, + TestHasher, + TestMarshalizer, + tpn.AccntState, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, + tpn.ForkDetector, + tpn.BlockTracker, + tpn.GenesisBlocks, + tpn.RequestHandler, + tpn.TxCoordinator, + TestUint64Converter, + ) + } + + if err != nil { + fmt.Printf("Error creating blockprocessor: %s\n", err.Error()) + } } func (tpn *TestProcessorNode) setGenesisBlock() { - genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] - _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) - hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) - tpn.BlockChain.SetGenesisHeaderHash(hash) + genesisBlock := tpn.GenesisBlocks[tpn.ShardCoordinator.SelfId()] + _ = tpn.BlockChain.SetGenesisHeader(genesisBlock) + hash, _ := core.CalculateHash(TestMarshalizer, TestHasher, genesisBlock) + tpn.BlockChain.SetGenesisHeaderHash(hash) } func (tpn *TestProcessorNode) initNode() { - var err error - - tpn.Node, err = node.NewNode( - node.WithMessenger(tpn.Messenger), - node.WithMarshalizer(TestMarshalizer), - node.WithHasher(TestHasher), - node.WithHasher(TestHasher), - node.WithAddressConverter(TestAddressConverter), - node.WithAccountsAdapter(tpn.AccntState), - node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), - node.WithShardCoordinator(tpn.ShardCoordinator), - node.WithNodesCoordinator(tpn.NodesCoordinator), - node.WithBlockChain(tpn.BlockChain), - node.WithUint64ByteSliceConverter(TestUint64Converter), - node.WithMultiSigner(tpn.MultiSigner), - node.WithSingleSigner(tpn.OwnAccount.SingleSigner), - node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), - node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), - node.WithPrivKey(tpn.NodeKeys.Sk), - node.WithPubKey(tpn.NodeKeys.Pk), - node.WithInterceptorsContainer(tpn.InterceptorsContainer), - node.WithResolversFinder(tpn.ResolverFinder), - node.WithBlockProcessor(tpn.BlockProcessor), - node.WithTxSingleSigner(tpn.OwnAccount.SingleSigner), - node.WithDataStore(tpn.Storage), - node.WithSyncer(&mock.SyncTimerMock{}), - ) - if err != nil { - fmt.Printf("Error creating node: %s\n", err.Error()) - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - err = tpn.Node.ApplyOptions( - node.WithMetaDataPool(tpn.MetaDataPool), - ) - } else { - err = tpn.Node.ApplyOptions( - node.WithDataPool(tpn.ShardDataPool), - ) - } - - if err != nil { - fmt.Printf("Error creating node: %s\n", err.Error()) - } + var err error + + tpn.Node, err = node.NewNode( + node.WithMessenger(tpn.Messenger), + node.WithMarshalizer(TestMarshalizer), + node.WithHasher(TestHasher), + node.WithHasher(TestHasher), + node.WithAddressConverter(TestAddressConverter), + node.WithAccountsAdapter(tpn.AccntState), + node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), + node.WithShardCoordinator(tpn.ShardCoordinator), + node.WithNodesCoordinator(tpn.NodesCoordinator), + node.WithBlockChain(tpn.BlockChain), + node.WithUint64ByteSliceConverter(TestUint64Converter), + node.WithMultiSigner(tpn.MultiSigner), + node.WithSingleSigner(tpn.OwnAccount.SingleSigner), + node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), + node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), + node.WithPrivKey(tpn.NodeKeys.Sk), + node.WithPubKey(tpn.NodeKeys.Pk), + node.WithInterceptorsContainer(tpn.InterceptorsContainer), + node.WithResolversFinder(tpn.ResolverFinder), + node.WithBlockProcessor(tpn.BlockProcessor), + node.WithTxSingleSigner(tpn.OwnAccount.SingleSigner), + node.WithDataStore(tpn.Storage), + node.WithSyncer(&mock.SyncTimerMock{}), + ) + if err != nil { + fmt.Printf("Error creating node: %s\n", err.Error()) + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + err = tpn.Node.ApplyOptions( + node.WithMetaDataPool(tpn.MetaDataPool), + ) + } else { + err = tpn.Node.ApplyOptions( + node.WithDataPool(tpn.ShardDataPool), + ) + } + + if err != nil { + fmt.Printf("Error creating node: %s\n", err.Error()) + } } // SendTransaction can send a transaction (it does the dispatching) func (tpn *TestProcessorNode) SendTransaction(tx *dataTransaction.Transaction) (string, error) { - txHash, err := tpn.Node.SendTransaction( - tx.Nonce, - hex.EncodeToString(tx.SndAddr), - hex.EncodeToString(tx.RcvAddr), - tx.Value, - tx.GasPrice, - tx.GasLimit, - tx.Data, - tx.Signature, - ) - return txHash, err + txHash, err := tpn.Node.SendTransaction( + tx.Nonce, + hex.EncodeToString(tx.SndAddr), + hex.EncodeToString(tx.RcvAddr), + tx.Value, + tx.GasPrice, + tx.GasLimit, + tx.Data, + tx.Signature, + ) + return txHash, err } func (tpn *TestProcessorNode) addHandlersForCounters() { - metaHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterMetaRcv, 1) - } - hdrHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterHdrRecv, 1) - } - - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.MetaDataPool.ShardHeaders().RegisterHandler(hdrHandlers) - tpn.MetaDataPool.MetaChainBlocks().RegisterHandler(metaHandlers) - } else { - txHandler := func(key []byte) { - atomic.AddInt32(&tpn.CounterTxRecv, 1) - } - mbHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterMbRecv, 1) - } - - tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) - tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) - tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) - tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) - tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) - tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) - } + metaHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterMetaRcv, 1) + } + hdrHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterHdrRecv, 1) + } + + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + tpn.MetaDataPool.ShardHeaders().RegisterHandler(hdrHandlers) + tpn.MetaDataPool.MetaChainBlocks().RegisterHandler(metaHandlers) + } else { + txHandler := func(key []byte) { + atomic.AddInt32(&tpn.CounterTxRecv, 1) + } + mbHandlers := func(key []byte) { + atomic.AddInt32(&tpn.CounterMbRecv, 1) + } + + tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) + tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) + tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) + tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) + tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) + tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) + } } // LoadTxSignSkBytes alters the already generated sk/pk pair func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { - tpn.OwnAccount.LoadTxSignSkBytes(skBytes) + tpn.OwnAccount.LoadTxSignSkBytes(skBytes) } // ProposeBlock proposes a new block func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { - startTime := time.Now() - maxTime := time.Second - - haveTime := func() bool { - elapsedTime := time.Since(startTime) - remainingTime := maxTime - elapsedTime - return remainingTime > 0 - } - - blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) - if err != nil { - fmt.Println(err.Error()) - return nil, nil, nil - } - blockHeader, err := tpn.BlockProcessor.CreateBlockHeader(blockBody, round, haveTime) - if err != nil { - fmt.Println(err.Error()) - return nil, nil, nil - } - - blockHeader.SetRound(round) - blockHeader.SetNonce(nonce) - blockHeader.SetPubKeysBitmap([]byte{1}) - sig, _ := TestMultiSig.AggregateSigs(nil) - blockHeader.SetSignature(sig) - currHdr := tpn.BlockChain.GetCurrentBlockHeader() - if currHdr == nil { - currHdr = tpn.BlockChain.GetGenesisHeader() - } - - buff, _ := TestMarshalizer.Marshal(currHdr) - blockHeader.SetPrevHash(TestHasher.Compute(string(buff))) - blockHeader.SetPrevRandSeed(currHdr.GetRandSeed()) - blockHeader.SetRandSeed(sig) - - shardBlockBody, ok := blockBody.(dataBlock.Body) - txHashes := make([][]byte, 0) - if !ok { - return blockBody, blockHeader, txHashes - } - - for _, mb := range shardBlockBody { - for _, hash := range mb.TxHashes { - copiedHash := make([]byte, len(hash)) - copy(copiedHash, hash) - txHashes = append(txHashes, copiedHash) - } - } - - return blockBody, blockHeader, txHashes + startTime := time.Now() + maxTime := time.Second + + haveTime := func() bool { + elapsedTime := time.Since(startTime) + remainingTime := maxTime - elapsedTime + return remainingTime > 0 + } + + blockBody, err := tpn.BlockProcessor.CreateBlockBody(round, haveTime) + if err != nil { + fmt.Println(err.Error()) + return nil, nil, nil + } + blockHeader, err := tpn.BlockProcessor.CreateBlockHeader(blockBody, round, haveTime) + if err != nil { + fmt.Println(err.Error()) + return nil, nil, nil + } + + blockHeader.SetRound(round) + blockHeader.SetNonce(nonce) + blockHeader.SetPubKeysBitmap([]byte{1}) + sig, _ := TestMultiSig.AggregateSigs(nil) + blockHeader.SetSignature(sig) + currHdr := tpn.BlockChain.GetCurrentBlockHeader() + if currHdr == nil { + currHdr = tpn.BlockChain.GetGenesisHeader() + } + + buff, _ := TestMarshalizer.Marshal(currHdr) + blockHeader.SetPrevHash(TestHasher.Compute(string(buff))) + blockHeader.SetPrevRandSeed(currHdr.GetRandSeed()) + blockHeader.SetRandSeed(sig) + + shardBlockBody, ok := blockBody.(dataBlock.Body) + txHashes := make([][]byte, 0) + if !ok { + return blockBody, blockHeader, txHashes + } + + for _, mb := range shardBlockBody { + for _, hash := range mb.TxHashes { + copiedHash := make([]byte, len(hash)) + copy(copiedHash, hash) + txHashes = append(txHashes, copiedHash) + } + } + + return blockBody, blockHeader, txHashes } // BroadcastBlock broadcasts the block and body to the connected peers func (tpn *TestProcessorNode) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) { - _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) - _ = tpn.BroadcastMessenger.BroadcastHeader(header) - miniBlocks, transactions, _ := tpn.BlockProcessor.MarshalizedDataToBroadcast(header, body) - _ = tpn.BroadcastMessenger.BroadcastMiniBlocks(miniBlocks) - _ = tpn.BroadcastMessenger.BroadcastTransactions(transactions) + _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) + _ = tpn.BroadcastMessenger.BroadcastHeader(header) + miniBlocks, transactions, _ := tpn.BlockProcessor.MarshalizedDataToBroadcast(header, body) + _ = tpn.BroadcastMessenger.BroadcastMiniBlocks(miniBlocks) + _ = tpn.BroadcastMessenger.BroadcastTransactions(transactions) } // CommitBlock commits the block and body func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.HeaderHandler) { - _ = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) + _ = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) } // GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetShardHeader(nonce uint64) (*dataBlock.Header, error) { - invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil || tpn.ShardDataPool.HeadersNonces() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } - - syncMapHashNonce, ok := tpn.ShardDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) - } - - headerObject, ok := tpn.ShardDataPool.Headers().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } - - header, ok := headerObject.(*dataBlock.Header) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for hash %s", hex.EncodeToString(headerHash))) - } - - return header, nil + invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil || tpn.ShardDataPool.HeadersNonces() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } + + syncMapHashNonce, ok := tpn.ShardDataPool.HeadersNonces().Get(nonce) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) + } + + headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + } + + headerObject, ok := tpn.ShardDataPool.Headers().Get(headerHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) + } + + header, ok := headerObject.(*dataBlock.Header) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for hash %s", hex.EncodeToString(headerHash))) + } + + return header, nil } // GetBlockBody returns the body for provided header parameter func (tpn *TestProcessorNode) GetBlockBody(header *dataBlock.Header) (dataBlock.Body, error) { - invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.MiniBlocks() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } + invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.MiniBlocks() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } - body := dataBlock.Body{} - for _, miniBlockHeader := range header.MiniBlockHeaders { - miniBlockHash := miniBlockHeader.Hash + body := dataBlock.Body{} + for _, miniBlockHeader := range header.MiniBlockHeaders { + miniBlockHash := miniBlockHeader.Hash - mbObject, ok := tpn.ShardDataPool.MiniBlocks().Get(miniBlockHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no miniblock found for hash %s", hex.EncodeToString(miniBlockHash))) - } + mbObject, ok := tpn.ShardDataPool.MiniBlocks().Get(miniBlockHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no miniblock found for hash %s", hex.EncodeToString(miniBlockHash))) + } - mb, ok := mbObject.(*dataBlock.MiniBlock) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.MiniBlock stored in miniblocks found for hash %s", hex.EncodeToString(miniBlockHash))) - } + mb, ok := mbObject.(*dataBlock.MiniBlock) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.MiniBlock stored in miniblocks found for hash %s", hex.EncodeToString(miniBlockHash))) + } - body = append(body, mb) - } + body = append(body, mb) + } - return body, nil + return body, nil } // GetMetaHeader returns the first *dataBlock.MetaBlock stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetMetaHeader(nonce uint64) (*dataBlock.MetaBlock, error) { - invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.MetaChainBlocks() == nil || tpn.MetaDataPool.HeadersNonces() == nil - if invalidCachers { - return nil, errors.New("invalid data pool") - } - - syncMapHashNonce, ok := tpn.MetaDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) - } - - headerObject, ok := tpn.MetaDataPool.MetaChainBlocks().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } - - header, ok := headerObject.(*dataBlock.MetaBlock) - if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for hash %s", hex.EncodeToString(headerHash))) - } - - return header, nil + invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.MetaChainBlocks() == nil || tpn.MetaDataPool.HeadersNonces() == nil + if invalidCachers { + return nil, errors.New("invalid data pool") + } + + syncMapHashNonce, ok := tpn.MetaDataPool.HeadersNonces().Get(nonce) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) + } + + headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) + if !ok { + return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + } + + headerObject, ok := tpn.MetaDataPool.MetaChainBlocks().Get(headerHash) + if !ok { + return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) + } + + header, ok := headerObject.(*dataBlock.MetaBlock) + if !ok { + return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for hash %s", hex.EncodeToString(headerHash))) + } + + return header, nil } // SyncNode tries to process and commit a block already stored in data pool with provided nonce func (tpn *TestProcessorNode) SyncNode(nonce uint64) error { - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - return tpn.syncMetaNode(nonce) - } else { - return tpn.syncShardNode(nonce) - } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + return tpn.syncMetaNode(nonce) + } else { + return tpn.syncShardNode(nonce) + } } func (tpn *TestProcessorNode) syncShardNode(nonce uint64) error { - header, err := tpn.GetShardHeader(nonce) - if err != nil { - return err - } - - body, err := tpn.GetBlockBody(header) - if err != nil { - return err - } - - err = tpn.BlockProcessor.ProcessBlock( - tpn.BlockChain, - header, - body, - func() time.Duration { - return time.Second * 2 - }, - ) - if err != nil { - return err - } - - err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) - if err != nil { - return err - } - - return nil + header, err := tpn.GetShardHeader(nonce) + if err != nil { + return err + } + + body, err := tpn.GetBlockBody(header) + if err != nil { + return err + } + + err = tpn.BlockProcessor.ProcessBlock( + tpn.BlockChain, + header, + body, + func() time.Duration { + return time.Second * 2 + }, + ) + if err != nil { + return err + } + + err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, body) + if err != nil { + return err + } + + return nil } func (tpn *TestProcessorNode) syncMetaNode(nonce uint64) error { - header, err := tpn.GetMetaHeader(nonce) - if err != nil { - return err - } - - err = tpn.BlockProcessor.ProcessBlock( - tpn.BlockChain, - header, - &dataBlock.MetaBlockBody{}, - func() time.Duration { - return time.Second * 2 - }, - ) - if err != nil { - return err - } - - err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, &dataBlock.MetaBlockBody{}) - if err != nil { - return err - } - - return nil + header, err := tpn.GetMetaHeader(nonce) + if err != nil { + return err + } + + err = tpn.BlockProcessor.ProcessBlock( + tpn.BlockChain, + header, + &dataBlock.MetaBlockBody{}, + func() time.Duration { + return time.Second * 2 + }, + ) + if err != nil { + return err + } + + err = tpn.BlockProcessor.CommitBlock(tpn.BlockChain, header, &dataBlock.MetaBlockBody{}) + if err != nil { + return err + } + + return nil } // SetAccountNonce sets the account nonce with journal func (tpn *TestProcessorNode) SetAccountNonce(nonce uint64) error { - nodeAccount, _ := tpn.AccntState.GetAccountWithJournal(tpn.OwnAccount.Address) - err := nodeAccount.(*state.Account).SetNonceWithJournal(nonce) - if err != nil { - return err - } - - _, err = tpn.AccntState.Commit() - if err != nil { - return err - } - - return nil + nodeAccount, _ := tpn.AccntState.GetAccountWithJournal(tpn.OwnAccount.Address) + err := nodeAccount.(*state.Account).SetNonceWithJournal(nonce) + if err != nil { + return err + } + + _, err = tpn.AccntState.Commit() + if err != nil { + return err + } + + return nil } // MiniBlocksPresent checks if the all the miniblocks are present in the pool func (tpn *TestProcessorNode) MiniBlocksPresent(hashes [][]byte) bool { - mbCacher := tpn.ShardDataPool.MiniBlocks() - for i := 0; i < len(hashes); i++ { - ok := mbCacher.Has(hashes[i]) - if !ok { - return false - } - } - - return true + mbCacher := tpn.ShardDataPool.MiniBlocks() + for i := 0; i < len(hashes); i++ { + ok := mbCacher.Has(hashes[i]) + if !ok { + return false + } + } + + return true } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index e348a75ffd7..b2853b81148 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -1,196 +1,196 @@ package integrationTests import ( - "bytes" - "context" - "fmt" - - "github.com/ElrondNetwork/elrond-go/cmd/node/factory" - "github.com/ElrondNetwork/elrond-go/crypto" - kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" - "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/hashing/blake2b" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "context" + "fmt" + + "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/crypto" + kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" ) // NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator func NewTestProcessorNodeWithCustomNodesCoordinator( - maxShards uint32, - nodeShardId uint32, - initialNodeAddr string, - nodesCoordinator sharding.NodesCoordinator, - cp *CryptoParams, - keyIndex int, + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, ) *TestProcessorNode { - shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - - messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) - tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinator, - } - tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] - - llsig := &kmultisig.KyberMultiSignerBLS{} - blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} - - pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) - - tpn.MultiSigner, _ = multisig.NewBLSMultisig( - llsig, - blsHasher, - pubKeysMap[nodeShardId], - tpn.NodeKeys.Sk, - cp.KeyGen, - 0, - ) - if tpn.MultiSigner == nil { - fmt.Println("Error generating multisigner") - } - accountShardId := nodeShardId - if nodeShardId == sharding.MetachainShardId { - accountShardId = 0 - } - - tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) - tpn.initDataPools() - tpn.initTestNode() - - return tpn + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + + llsig := &kmultisig.KyberMultiSignerBLS{} + blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + blsHasher, + pubKeysMap[nodeShardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + accountShardId := nodeShardId + if nodeShardId == sharding.MetachainShardId { + accountShardId = 0 + } + + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn } // CreateNodesWithNodesCoordinator returns a map with nodes per shard each using a real nodes coordinator func CreateNodesWithNodesCoordinator( - nodesPerShard int, - nbMetaNodes int, - nbShards int, - shardConsensusGroupSize int, - metaConsensusGroupSize int, - seedAddress string, + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, ) map[uint32][]*TestProcessorNode { - cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) - pubKeys := PubKeysMapFromKeysMap(cp.Keys) - validatorsMap := GenValidatorsFromPubKeys(pubKeys) - nodesMap := make(map[uint32][]*TestProcessorNode) - for shardId, validatorList := range validatorsMap { - nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( - shardConsensusGroupSize, - metaConsensusGroupSize, - TestHasher, - shardId, - uint32(nbShards), - validatorsMap, - ) - - if err != nil { - fmt.Println("Error creating node coordinator") - } - - nodesList := make([]*TestProcessorNode, len(validatorList)) - for i := range validatorList { - nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( - uint32(nbShards), - shardId, - seedAddress, - nodesCoordinator, - cp, - i, - ) - } - nodesMap[shardId] = nodesList - } - - return nodesMap + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + TestHasher, + shardId, + uint32(nbShards), + validatorsMap, + ) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap } // ProposeBlockWithConsensusSignature proposes func ProposeBlockWithConsensusSignature( - shardId uint32, - nodesMap map[uint32][]*TestProcessorNode, - round uint64, - nonce uint64, - randomness []byte, + shardId uint32, + nodesMap map[uint32][]*TestProcessorNode, + round uint64, + nonce uint64, + randomness []byte, ) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { - nodesCoordinator := nodesMap[shardId][0].NodesCoordinator - pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) - if err != nil { - fmt.Println("Error getting the validators public keys: ", err) - } + nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) + if err != nil { + fmt.Println("Error getting the validators public keys: ", err) + } - adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) - // set the consensus reward addresses - for _, node := range nodesMap[shardId] { - node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) - } + // set the consensus reward addresses + for _, node := range nodesMap[shardId] { + node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) + } - consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) - // first node is block proposer - body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) - header.SetPrevRandSeed(randomness) - header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) + // first node is block proposer + body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) + header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) - return body, header, txHashes, consensusNodes + return body, header, txHashes, consensusNodes } func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { - selectedNodes := make([]*TestProcessorNode, len(pubKeys)) - cntNodes := 0 - - for i, pk := range pubKeys { - for _, node := range nodes { - pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() - if bytes.Equal(pubKeyBytes, []byte(pk)) { - selectedNodes[i] = node - cntNodes++ - } - } - } - - if cntNodes != len(pubKeys) { - fmt.Println("Error selecting nodes from public keys") - } - - return selectedNodes + selectedNodes := make([]*TestProcessorNode, len(pubKeys)) + cntNodes := 0 + + for i, pk := range pubKeys { + for _, node := range nodes { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + if bytes.Equal(pubKeyBytes, []byte(pk)) { + selectedNodes[i] = node + cntNodes++ + } + } + } + + if cntNodes != len(pubKeys) { + fmt.Println("Error selecting nodes from public keys") + } + + return selectedNodes } // DoConsensusSigningOnBlock simulates a consensus aggregated signature on the provided block func DoConsensusSigningOnBlock( - blockHeader data.HeaderHandler, - consensusNodes []*TestProcessorNode, - pubKeys []string, + blockHeader data.HeaderHandler, + consensusNodes []*TestProcessorNode, + pubKeys []string, ) data.HeaderHandler { - // set bitmap for all consensus nodes signing - bitmap := make([]byte, len(consensusNodes)/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) - blockHeader.SetPubKeysBitmap(bitmap) - // clear signature, as we need to compute it below - blockHeader.SetSignature(nil) - blockHeader.SetPubKeysBitmap(nil) - blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) - blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) - - var msig crypto.MultiSigner - msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) - _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) - - for i := 1; i < len(consensusNodes); i++ { - msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) - sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) - _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) - } - - sig, _ := msigProposer.AggregateSigs(bitmap) - blockHeader.SetSignature(sig) - blockHeader.SetPubKeysBitmap(bitmap) - - return blockHeader + // set bitmap for all consensus nodes signing + bitmap := make([]byte, len(consensusNodes)/8+1) + for i := range bitmap { + bitmap[i] = 0xFF + } + + bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) + blockHeader.SetPubKeysBitmap(bitmap) + // clear signature, as we need to compute it below + blockHeader.SetSignature(nil) + blockHeader.SetPubKeysBitmap(nil) + blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) + blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) + + var msig crypto.MultiSigner + msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) + _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) + + for i := 1; i < len(consensusNodes); i++ { + msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) + sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) + _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) + } + + sig, _ := msigProposer.AggregateSigs(bitmap) + blockHeader.SetSignature(sig) + blockHeader.SetPubKeysBitmap(bitmap) + + return blockHeader } diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 7edf82f9c0e..64560dbc1e7 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -1,85 +1,85 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) // BlockProcessorStub mocks the implementation for a blockProcessor type BlockProcessorStub struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockBodyCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockBodyCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } // ProcessBlock mocks pocessing a block func (blProcMock *BlockProcessorStub) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) + return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } // CommitBlock mocks the commit of a block func (blProcMock *BlockProcessorStub) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.CommitBlockCalled(blockChain, header, body) + return blProcMock.CommitBlockCalled(blockChain, header, body) } // RevertAccountState mocks revert of the accounts state func (blProcMock *BlockProcessorStub) RevertAccountState() { - blProcMock.RevertAccountStateCalled() + blProcMock.RevertAccountStateCalled() } // CreateGenesisBlock mocks the creation of a genesis block body func (blProcMock *BlockProcessorStub) CreateGenesisBlock(balances map[string]*big.Int) (data.HeaderHandler, error) { - return blProcMock.CreateGenesisBlockCalled(balances) + return blProcMock.CreateGenesisBlockCalled(balances) } // CreateTxBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorStub) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockBodyCalled(round, haveTime) + return blProcMock.CreateBlockBodyCalled(round, haveTime) } func (blProcMock *BlockProcessorStub) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorStub) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorStub) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorStub) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorStub) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string, uint64) { - panic("implement me") + panic("implement me") } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorStub) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 3659d098bd2..b07e72079a8 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -1,528 +1,528 @@ package block import ( - "bytes" - "fmt" - "sort" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "fmt" + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() type hashAndHdr struct { - hdr data.HeaderHandler - hash []byte + hdr data.HeaderHandler + hash []byte } type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - specialAddressHandler process.SpecialAddressHandler - accounts state.AccountsAdapter - forkDetector process.ForkDetector - hasher hashing.Hasher - marshalizer marshal.Marshalizer - store dataRetriever.StorageService - uint64Converter typeConverters.Uint64ByteSliceConverter - blockSizeThrottler process.BlockSizeThrottler - - mutNotarizedHdrs sync.RWMutex - notarizedHdrs mapShardHeaders - - onRequestHeaderHandlerByNonce func(shardId uint32, nonce uint64) - onRequestHeaderHandler func(shardId uint32, hash []byte) - - appStatusHandler core.AppStatusHandler + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + specialAddressHandler process.SpecialAddressHandler + accounts state.AccountsAdapter + forkDetector process.ForkDetector + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + uint64Converter typeConverters.Uint64ByteSliceConverter + blockSizeThrottler process.BlockSizeThrottler + + mutNotarizedHdrs sync.RWMutex + notarizedHdrs mapShardHeaders + + onRequestHeaderHandlerByNonce func(shardId uint32, nonce uint64) + onRequestHeaderHandler func(shardId uint32, hash []byte) + + appStatusHandler core.AppStatusHandler } func checkForNils( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - if chainHandler == nil || chainHandler.IsInterfaceNil() { - return process.ErrNilBlockChain - } - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return process.ErrNilBlockBody - } - return nil + if chainHandler == nil || chainHandler.IsInterfaceNil() { + return process.ErrNilBlockChain + } + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return process.ErrNilBlockBody + } + return nil } // SetAppStatusHandler method is used to set appStatusHandler func (bp *baseProcessor) SetAppStatusHandler(ash core.AppStatusHandler) error { - if ash == nil || ash.IsInterfaceNil() { - return process.ErrNilAppStatusHandler - } + if ash == nil || ash.IsInterfaceNil() { + return process.ErrNilAppStatusHandler + } - bp.appStatusHandler = ash - return nil + bp.appStatusHandler = ash + return nil } // RevertAccountState reverts the account state for cleanup failed process func (bp *baseProcessor) RevertAccountState() { - err := bp.accounts.RevertToSnapshot(0) - if err != nil { - log.Error(err.Error()) - } + err := bp.accounts.RevertToSnapshot(0) + if err != nil { + log.Error(err.Error()) + } } // AddLastNotarizedHdr adds the last notarized header func (bp *baseProcessor) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - bp.mutNotarizedHdrs.Lock() - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) + bp.mutNotarizedHdrs.Unlock() } // checkBlockValidity method checks if the given block is valid func (bp *baseProcessor) checkBlockValidity( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - err := checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } + err := checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } - currentBlockHeader := chainHandler.GetCurrentBlockHeader() + currentBlockHeader := chainHandler.GetCurrentBlockHeader() - if currentBlockHeader == nil { - if headerHandler.GetNonce() == 1 { // first block after genesis - if bytes.Equal(headerHandler.GetPrevHash(), chainHandler.GetGenesisHeaderHash()) { - // TODO: add genesis block verification - return nil - } + if currentBlockHeader == nil { + if headerHandler.GetNonce() == 1 { // first block after genesis + if bytes.Equal(headerHandler.GetPrevHash(), chainHandler.GetGenesisHeaderHash()) { + // TODO: add genesis block verification + return nil + } - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", - core.ToB64(chainHandler.GetGenesisHeaderHash()), - core.ToB64(headerHandler.GetPrevHash()))) + log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + core.ToB64(chainHandler.GetGenesisHeaderHash()), + core.ToB64(headerHandler.GetPrevHash()))) - return process.ErrBlockHashDoesNotMatch - } + return process.ErrBlockHashDoesNotMatch + } - log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", - headerHandler.GetNonce())) + log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", + headerHandler.GetNonce())) - return process.ErrWrongNonceInBlock - } + return process.ErrWrongNonceInBlock + } - if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", - currentBlockHeader.GetRound(), headerHandler.GetRound())) + if headerHandler.GetRound() <= currentBlockHeader.GetRound() { + log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", + currentBlockHeader.GetRound(), headerHandler.GetRound())) - return process.ErrLowerRoundInBlock - } + return process.ErrLowerRoundInBlock + } - if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", - currentBlockHeader.GetNonce(), headerHandler.GetNonce())) + if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { + log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", + currentBlockHeader.GetNonce(), headerHandler.GetNonce())) - return process.ErrWrongNonceInBlock - } + return process.ErrWrongNonceInBlock + } - prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, currentBlockHeader) - if err != nil { - return err - } + prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, currentBlockHeader) + if err != nil { + return err + } - if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", - core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) + if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { + log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", + core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) - return process.ErrRandSeedMismatch - } + return process.ErrRandSeedMismatch + } - if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", - core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) + if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { + log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) - return process.ErrBlockHashDoesNotMatch - } + return process.ErrBlockHashDoesNotMatch + } - if bodyHandler != nil { - // TODO: add bodyHandler verification here - } + if bodyHandler != nil { + // TODO: add bodyHandler verification here + } - // TODO: add signature validation as well, with randomness source and all - return nil + // TODO: add signature validation as well, with randomness source and all + return nil } // verifyStateRoot verifies the state root hash given as parameter against the // Merkle trie root hash stored for accounts and returns if equal or not func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { - trieRootHash, err := bp.accounts.RootHash() - if err != nil { - log.Debug(err.Error()) - } + trieRootHash, err := bp.accounts.RootHash() + if err != nil { + log.Debug(err.Error()) + } - return bytes.Equal(trieRootHash, rootHash) + return bytes.Equal(trieRootHash, rootHash) } // getRootHash returns the accounts merkle tree root hash func (bp *baseProcessor) getRootHash() []byte { - rootHash, err := bp.accounts.RootHash() - if err != nil { - log.Debug(err.Error()) - } + rootHash, err := bp.accounts.RootHash() + if err != nil { + log.Debug(err.Error()) + } - return rootHash + return rootHash } func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { - if prevHdr == nil || prevHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if currHdr == nil || currHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - - // special case with genesis nonce - 0 - if currHdr.GetNonce() == 0 { - if prevHdr.GetNonce() != 0 { - return process.ErrWrongNonceInBlock - } - // block with nonce 0 was already saved - if prevHdr.GetRootHash() != nil { - return process.ErrRootStateMissmatch - } - return nil - } - - //TODO: add verification if rand seed was correctly computed add other verification - //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected - if prevHdr.GetRound() >= currHdr.GetRound() { - return process.ErrLowerRoundInOtherChainBlock - } - - if currHdr.GetNonce() != prevHdr.GetNonce()+1 { - return process.ErrWrongNonceInBlock - } - - prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, prevHdr) - if err != nil { - return err - } - - if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - return process.ErrRandSeedMismatch - } - - if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - return process.ErrHashDoesNotMatchInOtherChainBlock - } - - return nil + if prevHdr == nil || prevHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if currHdr == nil || currHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + + // special case with genesis nonce - 0 + if currHdr.GetNonce() == 0 { + if prevHdr.GetNonce() != 0 { + return process.ErrWrongNonceInBlock + } + // block with nonce 0 was already saved + if prevHdr.GetRootHash() != nil { + return process.ErrRootStateMissmatch + } + return nil + } + + //TODO: add verification if rand seed was correctly computed add other verification + //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected + if prevHdr.GetRound() >= currHdr.GetRound() { + return process.ErrLowerRoundInOtherChainBlock + } + + if currHdr.GetNonce() != prevHdr.GetNonce()+1 { + return process.ErrWrongNonceInBlock + } + + prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, prevHdr) + if err != nil { + return err + } + + if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { + return process.ErrRandSeedMismatch + } + + if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { + return process.ErrHashDoesNotMatchInOtherChainBlock + } + + return nil } func (bp *baseProcessor) checkHeaderTypeCorrect(shardId uint32, hdr data.HeaderHandler) error { - if shardId >= bp.shardCoordinator.NumberOfShards() && shardId != sharding.MetachainShardId { - return process.ErrShardIdMissmatch - } - - if shardId < bp.shardCoordinator.NumberOfShards() { - _, ok := hdr.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - if shardId == sharding.MetachainShardId { - _, ok := hdr.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - return nil + if shardId >= bp.shardCoordinator.NumberOfShards() && shardId != sharding.MetachainShardId { + return process.ErrShardIdMissmatch + } + + if shardId < bp.shardCoordinator.NumberOfShards() { + _, ok := hdr.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + } + + if shardId == sharding.MetachainShardId { + _, ok := hdr.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + } + + return nil } func (bp *baseProcessor) removeNotarizedHdrsBehindFinal(hdrsToAttestFinality uint32) { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := uint32(len(bp.notarizedHdrs[shardId])) - if notarizedHdrsCount > hdrsToAttestFinality { - finalIndex := notarizedHdrsCount - 1 - hdrsToAttestFinality - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][finalIndex:] - } - } - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + for shardId := range bp.notarizedHdrs { + notarizedHdrsCount := uint32(len(bp.notarizedHdrs[shardId])) + if notarizedHdrsCount > hdrsToAttestFinality { + finalIndex := notarizedHdrsCount - 1 - hdrsToAttestFinality + bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][finalIndex:] + } + } + bp.mutNotarizedHdrs.Unlock() } func (bp *baseProcessor) removeLastNotarized() { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 0 { - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:notarizedHdrsCount-1] - } - } - bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + for shardId := range bp.notarizedHdrs { + notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) + if notarizedHdrsCount > 0 { + bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:notarizedHdrsCount-1] + } + } + bp.mutNotarizedHdrs.Unlock() } func (bp *baseProcessor) lastNotarizedHdrForShard(shardId uint32) data.HeaderHandler { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 0 { - return bp.notarizedHdrs[shardId][notarizedHdrsCount-1] - } + notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) + if notarizedHdrsCount > 0 { + return bp.notarizedHdrs[shardId][notarizedHdrsCount-1] + } - return nil + return nil } func (bp *baseProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() + bp.mutNotarizedHdrs.Lock() + defer bp.mutNotarizedHdrs.Unlock() - if bp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } + if bp.notarizedHdrs == nil { + return process.ErrNotarizedHdrsSliceIsNil + } - err := bp.checkHeaderTypeCorrect(shardId, bp.lastNotarizedHdrForShard(shardId)) - if err != nil { - return err - } + err := bp.checkHeaderTypeCorrect(shardId, bp.lastNotarizedHdrForShard(shardId)) + if err != nil { + return err + } - sort.Slice(processedHdrs, func(i, j int) bool { - return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() - }) + sort.Slice(processedHdrs, func(i, j int) bool { + return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() + }) - tmpLastNotarizedHdrForShard := bp.lastNotarizedHdrForShard(shardId) + tmpLastNotarizedHdrForShard := bp.lastNotarizedHdrForShard(shardId) - for i := 0; i < len(processedHdrs); i++ { - err = bp.checkHeaderTypeCorrect(shardId, processedHdrs[i]) - if err != nil { - return err - } + for i := 0; i < len(processedHdrs); i++ { + err = bp.checkHeaderTypeCorrect(shardId, processedHdrs[i]) + if err != nil { + return err + } - err = bp.isHdrConstructionValid(processedHdrs[i], tmpLastNotarizedHdrForShard) - if err != nil { - return err - } + err = bp.isHdrConstructionValid(processedHdrs[i], tmpLastNotarizedHdrForShard) + if err != nil { + return err + } - tmpLastNotarizedHdrForShard = processedHdrs[i] - } + tmpLastNotarizedHdrForShard = processedHdrs[i] + } - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], tmpLastNotarizedHdrForShard) - DisplayLastNotarized(bp.marshalizer, bp.hasher, tmpLastNotarizedHdrForShard, shardId) + bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], tmpLastNotarizedHdrForShard) + DisplayLastNotarized(bp.marshalizer, bp.hasher, tmpLastNotarizedHdrForShard, shardId) - return nil + return nil } func (bp *baseProcessor) getLastNotarizedHdr(shardId uint32) (data.HeaderHandler, error) { - bp.mutNotarizedHdrs.RLock() - defer bp.mutNotarizedHdrs.RUnlock() + bp.mutNotarizedHdrs.RLock() + defer bp.mutNotarizedHdrs.RUnlock() - if bp.notarizedHdrs == nil { - return nil, process.ErrNotarizedHdrsSliceIsNil - } + if bp.notarizedHdrs == nil { + return nil, process.ErrNotarizedHdrsSliceIsNil + } - hdr := bp.lastNotarizedHdrForShard(shardId) + hdr := bp.lastNotarizedHdrForShard(shardId) - err := bp.checkHeaderTypeCorrect(shardId, hdr) - if err != nil { - return nil, err - } + err := bp.checkHeaderTypeCorrect(shardId, hdr) + if err != nil { + return nil, err + } - return hdr, nil + return hdr, nil } // SetLastNotarizedHeadersSlice sets the headers blocks in notarizedHdrs for every shard // This is done when starting a new epoch so metachain can use it when validating next shard header blocks // and shard can validate the next meta header func (bp *baseProcessor) setLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { - //TODO: protect this to be called only once at genesis time - //TODO: do this on constructor as it is a must to for blockprocessor to work - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() - - if startHeaders == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - bp.notarizedHdrs = make(mapShardHeaders, bp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < bp.shardCoordinator.NumberOfShards(); i++ { - hdr, ok := startHeaders[i].(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[i] = append(bp.notarizedHdrs[i], hdr) - } - - hdr, ok := startHeaders[sharding.MetachainShardId].(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[sharding.MetachainShardId] = append(bp.notarizedHdrs[sharding.MetachainShardId], hdr) - - return nil + //TODO: protect this to be called only once at genesis time + //TODO: do this on constructor as it is a must to for blockprocessor to work + bp.mutNotarizedHdrs.Lock() + defer bp.mutNotarizedHdrs.Unlock() + + if startHeaders == nil { + return process.ErrNotarizedHdrsSliceIsNil + } + + bp.notarizedHdrs = make(mapShardHeaders, bp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < bp.shardCoordinator.NumberOfShards(); i++ { + hdr, ok := startHeaders[i].(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + bp.notarizedHdrs[i] = append(bp.notarizedHdrs[i], hdr) + } + + hdr, ok := startHeaders[sharding.MetachainShardId].(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + bp.notarizedHdrs[sharding.MetachainShardId] = append(bp.notarizedHdrs[sharding.MetachainShardId], hdr) + + return nil } func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler, shardId uint32, maxRound uint64) error { - prevHdr, err := bp.getLastNotarizedHdr(shardId) - if err != nil { - return err - } - - isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed - if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { - return process.ErrNoSortedHdrsForShard - } - - missingNonces := make([]uint64, 0) - for i := 0; i < len(sortedHdrs); i++ { - currHdr := sortedHdrs[i] - if currHdr == nil { - continue - } - - if i > 0 { - prevHdr = sortedHdrs[i-1] - } - - hdrTooNew := currHdr.GetRound() > maxRound || prevHdr.GetRound() > maxRound - if hdrTooNew { - continue - } - - if currHdr.GetNonce()-prevHdr.GetNonce() > 1 { - for j := prevHdr.GetNonce() + 1; j < currHdr.GetNonce(); j++ { - missingNonces = append(missingNonces, j) - } - } - } - - // ask for headers, if there most probably should be - if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { - startNonce := prevHdr.GetNonce() + 1 - for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { - missingNonces = append(missingNonces, nonce) - } - } - - requested := 0 - for _, nonce := range missingNonces { - // do the request here - if bp.onRequestHeaderHandlerByNonce == nil { - return process.ErrNilRequestHeaderHandlerByNonce - } - - if requested >= process.MaxHeaderRequestsAllowed { - break - } - - requested++ - go bp.onRequestHeaderHandlerByNonce(shardId, nonce) - } - - return nil + prevHdr, err := bp.getLastNotarizedHdr(shardId) + if err != nil { + return err + } + + isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed + if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { + return process.ErrNoSortedHdrsForShard + } + + missingNonces := make([]uint64, 0) + for i := 0; i < len(sortedHdrs); i++ { + currHdr := sortedHdrs[i] + if currHdr == nil { + continue + } + + if i > 0 { + prevHdr = sortedHdrs[i-1] + } + + hdrTooNew := currHdr.GetRound() > maxRound || prevHdr.GetRound() > maxRound + if hdrTooNew { + continue + } + + if currHdr.GetNonce()-prevHdr.GetNonce() > 1 { + for j := prevHdr.GetNonce() + 1; j < currHdr.GetNonce(); j++ { + missingNonces = append(missingNonces, j) + } + } + } + + // ask for headers, if there most probably should be + if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { + startNonce := prevHdr.GetNonce() + 1 + for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { + missingNonces = append(missingNonces, nonce) + } + } + + requested := 0 + for _, nonce := range missingNonces { + // do the request here + if bp.onRequestHeaderHandlerByNonce == nil { + return process.ErrNilRequestHeaderHandlerByNonce + } + + if requested >= process.MaxHeaderRequestsAllowed { + break + } + + requested++ + go bp.onRequestHeaderHandlerByNonce(shardId, nonce) + } + + return nil } func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { - lines := make([]*display.LineData, 0) - - lines = append(lines, display.NewLineData(false, []string{ - "", - "Epoch", - fmt.Sprintf("%d", headerHandler.GetEpoch())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Round", - fmt.Sprintf("%d", headerHandler.GetRound())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "TimeStamp", - fmt.Sprintf("%d", headerHandler.GetTimeStamp())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Nonce", - fmt.Sprintf("%d", headerHandler.GetNonce())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Prev hash", - core.ToB64(headerHandler.GetPrevHash())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Prev rand seed", - core.ToB64(headerHandler.GetPrevRandSeed())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Rand seed", - core.ToB64(headerHandler.GetRandSeed())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Pub keys bitmap", - core.ToHex(headerHandler.GetPubKeysBitmap())})) - lines = append(lines, display.NewLineData(false, []string{ - "", - "Signature", - core.ToB64(headerHandler.GetSignature())})) - lines = append(lines, display.NewLineData(true, []string{ - "", - "Root hash", - core.ToB64(headerHandler.GetRootHash())})) - return lines + lines := make([]*display.LineData, 0) + + lines = append(lines, display.NewLineData(false, []string{ + "", + "Epoch", + fmt.Sprintf("%d", headerHandler.GetEpoch())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Round", + fmt.Sprintf("%d", headerHandler.GetRound())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "TimeStamp", + fmt.Sprintf("%d", headerHandler.GetTimeStamp())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Nonce", + fmt.Sprintf("%d", headerHandler.GetNonce())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Prev hash", + core.ToB64(headerHandler.GetPrevHash())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Prev rand seed", + core.ToB64(headerHandler.GetPrevRandSeed())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Rand seed", + core.ToB64(headerHandler.GetRandSeed())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Pub keys bitmap", + core.ToHex(headerHandler.GetPubKeysBitmap())})) + lines = append(lines, display.NewLineData(false, []string{ + "", + "Signature", + core.ToB64(headerHandler.GetSignature())})) + lines = append(lines, display.NewLineData(true, []string{ + "", + "Root hash", + core.ToB64(headerHandler.GetRootHash())})) + return lines } // checkProcessorNilParameters will check the imput parameters for nil values func checkProcessorNilParameters( - accounts state.AccountsAdapter, - forkDetector process.ForkDetector, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, + accounts state.AccountsAdapter, + forkDetector process.ForkDetector, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + store dataRetriever.StorageService, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) error { - if accounts == nil || accounts.IsInterfaceNil() { - return process.ErrNilAccountsAdapter - } - if forkDetector == nil || forkDetector.IsInterfaceNil() { - return process.ErrNilForkDetector - } - if hasher == nil || hasher.IsInterfaceNil() { - return process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return process.ErrNilMarshalizer - } - if store == nil || store.IsInterfaceNil() { - return process.ErrNilStorage - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return process.ErrNilShardCoordinator - } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { - return process.ErrNilNodesCoordinator - } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { - return process.ErrNilSpecialAddressHandler - } - if uint64Converter == nil || uint64Converter.IsInterfaceNil() { - return process.ErrNilUint64Converter - } - - return nil + if accounts == nil || accounts.IsInterfaceNil() { + return process.ErrNilAccountsAdapter + } + if forkDetector == nil || forkDetector.IsInterfaceNil() { + return process.ErrNilForkDetector + } + if hasher == nil || hasher.IsInterfaceNil() { + return process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return process.ErrNilMarshalizer + } + if store == nil || store.IsInterfaceNil() { + return process.ErrNilStorage + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return process.ErrNilShardCoordinator + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return process.ErrNilNodesCoordinator + } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return process.ErrNilSpecialAddressHandler + } + if uint64Converter == nil || uint64Converter.IsInterfaceNil() { + return process.ErrNilUint64Converter + } + + return nil } diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index f48a7155077..f0d3329c7d6 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -1,235 +1,235 @@ package block import ( - "fmt" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" ) type transactionCounter struct { - mutex sync.RWMutex - currentBlockTxs int - totalTxs int + mutex sync.RWMutex + currentBlockTxs int + totalTxs int } // NewTransactionCounter returns a new object that keeps track of how many transactions // were executed in total, and in the current block func NewTransactionCounter() *transactionCounter { - return &transactionCounter{ - mutex: sync.RWMutex{}, - currentBlockTxs: 0, - totalTxs: 0, - } + return &transactionCounter{ + mutex: sync.RWMutex{}, + currentBlockTxs: 0, + totalTxs: 0, + } } // getNumTxsFromPool returns the number of transactions from pool for a given shard func (txc *transactionCounter) getNumTxsFromPool(shardId uint32, dataPool dataRetriever.PoolsHolder, nrShards uint32) int { - txPool := dataPool.Transactions() - if txPool == nil { - return 0 - } - - sumTxs := 0 - - strCache := process.ShardCacherIdentifier(shardId, shardId) - txStore := txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - - for i := uint32(0); i < nrShards; i++ { - if i == shardId { - continue - } - - strCache = process.ShardCacherIdentifier(i, shardId) - txStore = txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - - strCache = process.ShardCacherIdentifier(shardId, i) - txStore = txPool.ShardDataStore(strCache) - if txStore != nil { - sumTxs += txStore.Len() - } - } - - return sumTxs + txPool := dataPool.Transactions() + if txPool == nil { + return 0 + } + + sumTxs := 0 + + strCache := process.ShardCacherIdentifier(shardId, shardId) + txStore := txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + + for i := uint32(0); i < nrShards; i++ { + if i == shardId { + continue + } + + strCache = process.ShardCacherIdentifier(i, shardId) + txStore = txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + + strCache = process.ShardCacherIdentifier(shardId, i) + txStore = txPool.ShardDataStore(strCache) + if txStore != nil { + sumTxs += txStore.Len() + } + } + + return sumTxs } // substractRestoredTxs updated the total processed txs in case of restore func (txc *transactionCounter) substractRestoredTxs(txsNr int) { - txc.mutex.Lock() - txc.totalTxs = txc.totalTxs - txsNr - txc.mutex.Unlock() + txc.mutex.Lock() + txc.totalTxs = txc.totalTxs - txsNr + txc.mutex.Unlock() } // displayLogInfo writes to the output information about the block and transactions func (txc *transactionCounter) displayLogInfo( - header *block.Header, - body block.Body, - headerHash []byte, - numShards uint32, - selfId uint32, - dataPool dataRetriever.PoolsHolder, + header *block.Header, + body block.Body, + headerHash []byte, + numShards uint32, + selfId uint32, + dataPool dataRetriever.PoolsHolder, ) { - dispHeader, dispLines := txc.createDisplayableShardHeaderAndBlockBody(header, body) - - tblString, err := display.CreateTableString(dispHeader, dispLines) - if err != nil { - log.Error(err.Error()) - return - } - - txc.mutex.RLock() - tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\n"+ - "Total txs processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n\n"+ - "Total shards: %d. Current shard id: %d\n", - core.ToB64(headerHash), - txc.totalTxs, - txc.currentBlockTxs, - txc.getNumTxsFromPool(selfId, dataPool, numShards), - numShards, - selfId) - txc.mutex.RUnlock() - log.Info(tblString) + dispHeader, dispLines := txc.createDisplayableShardHeaderAndBlockBody(header, body) + + tblString, err := display.CreateTableString(dispHeader, dispLines) + if err != nil { + log.Error(err.Error()) + return + } + + txc.mutex.RLock() + tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\n"+ + "Total txs processed until now: %d. Total txs processed for this block: %d. Total txs remained in pool: %d\n\n"+ + "Total shards: %d. Current shard id: %d\n", + core.ToB64(headerHash), + txc.totalTxs, + txc.currentBlockTxs, + txc.getNumTxsFromPool(selfId, dataPool, numShards), + numShards, + selfId) + txc.mutex.RUnlock() + log.Info(tblString) } func (txc *transactionCounter) createDisplayableShardHeaderAndBlockBody( - header *block.Header, - body block.Body, + header *block.Header, + body block.Body, ) ([]string, []*display.LineData) { - tableHeader := []string{"Part", "Parameter", "Value"} + tableHeader := []string{"Part", "Parameter", "Value"} - lines := displayHeader(header) + lines := displayHeader(header) - shardLines := make([]*display.LineData, 0) - shardLines = append(shardLines, display.NewLineData(false, []string{ - "Header", - "Block type", - "TxBlock"})) - shardLines = append(shardLines, display.NewLineData(false, []string{ - "", - "Shard", - fmt.Sprintf("%d", header.ShardId)})) - shardLines = append(shardLines, lines...) + shardLines := make([]*display.LineData, 0) + shardLines = append(shardLines, display.NewLineData(false, []string{ + "Header", + "Block type", + "TxBlock"})) + shardLines = append(shardLines, display.NewLineData(false, []string{ + "", + "Shard", + fmt.Sprintf("%d", header.ShardId)})) + shardLines = append(shardLines, lines...) - if header.BlockBodyType == block.TxBlock { - shardLines = txc.displayMetaHashesIncluded(shardLines, header) - shardLines = txc.displayTxBlockBody(shardLines, body) + if header.BlockBodyType == block.TxBlock { + shardLines = txc.displayMetaHashesIncluded(shardLines, header) + shardLines = txc.displayTxBlockBody(shardLines, body) - return tableHeader, shardLines - } + return tableHeader, shardLines + } - // TODO: implement the other block bodies + // TODO: implement the other block bodies - shardLines = append(shardLines, display.NewLineData(false, []string{"Unknown", "", ""})) - return tableHeader, shardLines + shardLines = append(shardLines, display.NewLineData(false, []string{"Unknown", "", ""})) + return tableHeader, shardLines } func (txc *transactionCounter) displayMetaHashesIncluded( - lines []*display.LineData, - header *block.Header, + lines []*display.LineData, + header *block.Header, ) []*display.LineData { - if header.MetaBlockHashes == nil || len(header.MetaBlockHashes) == 0 { - return lines - } + if header.MetaBlockHashes == nil || len(header.MetaBlockHashes) == 0 { + return lines + } - part := fmt.Sprintf("MetaBlockHashes") - for i := 0; i < len(header.MetaBlockHashes); i++ { - if i == 0 || i >= len(header.MetaBlockHashes)-1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("MetaBlockHash_%d", i+1), - core.ToB64(header.MetaBlockHashes[i])})) + part := fmt.Sprintf("MetaBlockHashes") + for i := 0; i < len(header.MetaBlockHashes); i++ { + if i == 0 || i >= len(header.MetaBlockHashes)-1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("MetaBlockHash_%d", i+1), + core.ToB64(header.MetaBlockHashes[i])})) - part = "" - } else if i == 1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("..."), - fmt.Sprintf("...")})) + part = "" + } else if i == 1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("..."), + fmt.Sprintf("...")})) - part = "" - } - } + part = "" + } + } - lines[len(lines)-1].HorizontalRuleAfter = true + lines[len(lines)-1].HorizontalRuleAfter = true - return lines + return lines } func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, body block.Body) []*display.LineData { - currentBlockTxs := 0 + currentBlockTxs := 0 - for i := 0; i < len(body); i++ { - miniBlock := body[i] + for i := 0; i < len(body); i++ { + miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) + part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) - if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { - lines = append(lines, display.NewLineData(false, []string{ - part, "", ""})) - } + if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { + lines = append(lines, display.NewLineData(false, []string{ + part, "", ""})) + } - currentBlockTxs += len(miniBlock.TxHashes) + currentBlockTxs += len(miniBlock.TxHashes) - for j := 0; j < len(miniBlock.TxHashes); j++ { - if j == 0 || j >= len(miniBlock.TxHashes)-1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("TxHash_%d", j+1), - core.ToB64(miniBlock.TxHashes[j])})) + for j := 0; j < len(miniBlock.TxHashes); j++ { + if j == 0 || j >= len(miniBlock.TxHashes)-1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("TxHash_%d", j+1), + core.ToB64(miniBlock.TxHashes[j])})) - part = "" - } else if j == 1 { - lines = append(lines, display.NewLineData(false, []string{ - part, - fmt.Sprintf("..."), - fmt.Sprintf("...")})) + part = "" + } else if j == 1 { + lines = append(lines, display.NewLineData(false, []string{ + part, + fmt.Sprintf("..."), + fmt.Sprintf("...")})) - part = "" - } - } + part = "" + } + } - lines[len(lines)-1].HorizontalRuleAfter = true - } + lines[len(lines)-1].HorizontalRuleAfter = true + } - txc.mutex.Lock() - txc.currentBlockTxs = currentBlockTxs - txc.totalTxs += currentBlockTxs - txc.mutex.Unlock() + txc.mutex.Lock() + txc.currentBlockTxs = currentBlockTxs + txc.totalTxs += currentBlockTxs + txc.mutex.Unlock() - return lines + return lines } func DisplayLastNotarized( - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - lastNotarizedHdrForShard data.HeaderHandler, - shardId uint32) { - lastNotarizedHdrHashForShard, errNotCritical := core.CalculateHash( - marshalizer, - hasher, - lastNotarizedHdrForShard) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - log.Info(fmt.Sprintf("last notarized block from shard %d has: round = %d, nonce = %d, hash = %s\n", - shardId, - lastNotarizedHdrForShard.GetRound(), - lastNotarizedHdrForShard.GetNonce(), - core.ToB64(lastNotarizedHdrHashForShard))) + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + lastNotarizedHdrForShard data.HeaderHandler, + shardId uint32) { + lastNotarizedHdrHashForShard, errNotCritical := core.CalculateHash( + marshalizer, + hasher, + lastNotarizedHdrForShard) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + log.Info(fmt.Sprintf("last notarized block from shard %d has: round = %d, nonce = %d, hash = %s\n", + shardId, + lastNotarizedHdrForShard.GetRound(), + lastNotarizedHdrForShard.GetNonce(), + core.ToB64(lastNotarizedHdrHashForShard))) } diff --git a/process/block/metablock.go b/process/block/metablock.go index 6b1c98afdc0..b7733bec771 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1,28 +1,28 @@ package block import ( - "encoding/base64" - "fmt" - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/display" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/throttle" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/ElrondNetwork/elrond-go/storage" + "encoding/base64" + "fmt" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/throttle" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" + "github.com/ElrondNetwork/elrond-go/storage" ) var shardMBHeaderCounterMutex = sync.RWMutex{} @@ -31,1398 +31,1398 @@ var shardMBHeadersTotalProcessed = 0 // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { - *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder + *baseProcessor + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder - currHighestShardHdrsNonces map[uint32]uint64 - requestedShardHdrsHashes map[string]bool - allNeededShardHdrsFound bool - mutRequestedShardHdrsHashes sync.RWMutex + currHighestShardHdrsNonces map[uint32]uint64 + requestedShardHdrsHashes map[string]bool + allNeededShardHdrsFound bool + mutRequestedShardHdrsHashes sync.RWMutex - shardsHeadersNonce *sync.Map + shardsHeadersNonce *sync.Map - nextKValidity uint32 + nextKValidity uint32 - chRcvAllHdrs chan bool + chRcvAllHdrs chan bool } // NewMetaProcessor creates a new metaProcessor object func NewMetaProcessor( - core serviceContainer.Core, - accounts state.AccountsAdapter, - dataPool dataRetriever.MetaPoolsHolder, - forkDetector process.ForkDetector, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, + core serviceContainer.Core, + accounts state.AccountsAdapter, + dataPool dataRetriever.MetaPoolsHolder, + forkDetector process.ForkDetector, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + store dataRetriever.StorageService, + startHeaders map[uint32]data.HeaderHandler, + requestHandler process.RequestHandler, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*metaProcessor, error) { - err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - uint64Converter) - if err != nil { - return nil, err - } - - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { - return nil, process.ErrNilHeadersDataPool - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - - blockSizeThrottler, err := throttle.NewBlockSizeThrottle() - if err != nil { - return nil, err - } - - base := &baseProcessor{ - accounts: accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - specialAddressHandler: specialAddressHandler, - uint64Converter: uint64Converter, - onRequestHeaderHandler: requestHandler.RequestHeader, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - } - - err = base.setLastNotarizedHeadersSlice(startHeaders) - if err != nil { - return nil, err - } - - mp := metaProcessor{ - core: core, - baseProcessor: base, - dataPool: dataPool, - } - - mp.requestedShardHdrsHashes = make(map[string]bool) - - headerPool := mp.dataPool.ShardHeaders() - headerPool.RegisterHandler(mp.receivedHeader) - - mp.chRcvAllHdrs = make(chan bool) - - mp.nextKValidity = process.ShardBlockFinality - mp.allNeededShardHdrsFound = true - - mp.shardsHeadersNonce = &sync.Map{} - - return &mp, nil + err := checkProcessorNilParameters( + accounts, + forkDetector, + hasher, + marshalizer, + store, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + uint64Converter) + if err != nil { + return nil, err + } + + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { + return nil, process.ErrNilHeadersDataPool + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + + blockSizeThrottler, err := throttle.NewBlockSizeThrottle() + if err != nil { + return nil, err + } + + base := &baseProcessor{ + accounts: accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: forkDetector, + hasher: hasher, + marshalizer: marshalizer, + store: store, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, + uint64Converter: uint64Converter, + onRequestHeaderHandler: requestHandler.RequestHeader, + onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + appStatusHandler: statusHandler.NewNilStatusHandler(), + } + + err = base.setLastNotarizedHeadersSlice(startHeaders) + if err != nil { + return nil, err + } + + mp := metaProcessor{ + core: core, + baseProcessor: base, + dataPool: dataPool, + } + + mp.requestedShardHdrsHashes = make(map[string]bool) + + headerPool := mp.dataPool.ShardHeaders() + headerPool.RegisterHandler(mp.receivedHeader) + + mp.chRcvAllHdrs = make(chan bool) + + mp.nextKValidity = process.ShardBlockFinality + mp.allNeededShardHdrsFound = true + + mp.shardsHeadersNonce = &sync.Map{} + + return &mp, nil } // ProcessBlock processes a block. It returns nil if all ok or the specific error func (mp *metaProcessor) ProcessBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, - haveTime func() time.Duration, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + haveTime func() time.Duration, ) error { - if haveTime == nil { - return process.ErrNilHaveTimeHandler - } - - err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) - - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) - err = mp.waitForBlockHeaders(haveTime()) - mp.mutRequestedShardHdrsHashes.Lock() - mp.allNeededShardHdrsFound = true - unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) - mp.mutRequestedShardHdrsHashes.Unlock() - log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) - if err != nil { - return err - } - } - - if mp.accounts.JournalLen() != 0 { - return process.ErrAccountStateDirty - } - - defer func() { - go mp.checkAndRequestIfShardHeadersMissing(header.Round) - }() - - highestNonceHdrs, err := mp.checkShardHeadersValidity(header) - if err != nil { - return err - } - - err = mp.checkShardHeadersFinality(header, highestNonceHdrs) - if err != nil { - return err - } - - defer func() { - if err != nil { - mp.RevertAccountState() - } - }() - - err = mp.processBlockHeaders(header, header.Round, haveTime) - if err != nil { - return err - } - - if !mp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch - return err - } - - return nil + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) + + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { + log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) + err = mp.waitForBlockHeaders(haveTime()) + mp.mutRequestedShardHdrsHashes.Lock() + mp.allNeededShardHdrsFound = true + unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) + mp.mutRequestedShardHdrsHashes.Unlock() + log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) + if err != nil { + return err + } + } + + if mp.accounts.JournalLen() != 0 { + return process.ErrAccountStateDirty + } + + defer func() { + go mp.checkAndRequestIfShardHeadersMissing(header.Round) + }() + + highestNonceHdrs, err := mp.checkShardHeadersValidity(header) + if err != nil { + return err + } + + err = mp.checkShardHeadersFinality(header, highestNonceHdrs) + if err != nil { + return err + } + + defer func() { + if err != nil { + mp.RevertAccountState() + } + }() + + err = mp.processBlockHeaders(header, header.Round, haveTime) + if err != nil { + return err + } + + if !mp.verifyStateRoot(header.GetRootHash()) { + err = process.ErrRootStateMissmatch + return err + } + + return nil } // SetConsensusRewardAddresses - sets the reward addresses for the current consensus group func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { - // TODO set the reward addresses for metachain consensus nodes + // TODO set the reward addresses for metachain consensus nodes } func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) - if err != nil { - log.Debug(err.Error()) - return - } - - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - // map from *block.Header to dataHandler - sortedHdrs := make([]data.HeaderHandler, 0) - for j := 0; j < len(sortedHdrPerShard[i]); j++ { - sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) - } - - err := mp.requestHeadersIfMissing(sortedHdrs, i, round) - if err != nil { - log.Debug(err.Error()) - continue - } - } - - return + _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) + if err != nil { + log.Debug(err.Error()) + return + } + + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + // map from *block.Header to dataHandler + sortedHdrs := make([]data.HeaderHandler, 0) + for j := 0; j < len(sortedHdrPerShard[i]); j++ { + sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) + } + + err := mp.requestHeadersIfMissing(sortedHdrs, i, round) + if err != nil { + log.Debug(err.Error()) + continue + } + } + + return } func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[string]*block.Header) { - if mp.core == nil || mp.core.Indexer() == nil { - return - } + if mp.core == nil || mp.core.Indexer() == nil { + return + } - // Update tps benchmarks in the DB - tpsBenchmark := mp.core.TPSBenchmark() - if tpsBenchmark != nil { - go mp.core.Indexer().UpdateTPS(tpsBenchmark) - } + // Update tps benchmarks in the DB + tpsBenchmark := mp.core.TPSBenchmark() + if tpsBenchmark != nil { + go mp.core.Indexer().UpdateTPS(tpsBenchmark) + } - //TODO: maybe index metablocks also? + //TODO: maybe index metablocks also? } // removeBlockInfoFromPool removes the block info from associated pools func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { - if header == nil || header.IsInterfaceNil() { - return process.ErrNilMetaBlockHeader - } - - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil || headerPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - - headerNoncesPool := mp.dataPool.HeadersNonces() - if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { - return process.ErrNilHeadersNoncesDataPool - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - obj, ok := headerPool.Peek(shardData.HeaderHash) - if !ok { - continue - } - - hdr, ok := obj.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - headerPool.Remove(shardData.HeaderHash) - headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) - } - - return nil + if header == nil || header.IsInterfaceNil() { + return process.ErrNilMetaBlockHeader + } + + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil || headerPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + + headerNoncesPool := mp.dataPool.HeadersNonces() + if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { + return process.ErrNilHeadersNoncesDataPool + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + + obj, ok := headerPool.Peek(shardData.HeaderHash) + if !ok { + continue + } + + hdr, ok := obj.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + headerPool.Remove(shardData.HeaderHash) + headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) + } + + return nil } // RestoreBlockIntoPools restores the block into associated pools func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error { - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilMetaBlockHeader - } - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil || headerPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - - headerNoncesPool := mp.dataPool.HeadersNonces() - if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { - return process.ErrNilHeadersNoncesDataPool - } - - hdrHashes := make([][]byte, 0) - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - hdrHashes = append(hdrHashes, shardData.HeaderHash) - } - - for _, hdrHash := range hdrHashes { - buff, err := mp.store.Get(dataRetriever.BlockHeaderUnit, hdrHash) - if err != nil { - log.Error(err.Error()) - continue - } - - hdr := block.Header{} - err = mp.marshalizer.Unmarshal(&hdr, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - headerPool.Put(hdrHash, &hdr) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(hdr.ShardId, hdrHash) - headerNoncesPool.Merge(hdr.Nonce, syncMap) - - err = mp.store.GetStorer(dataRetriever.BlockHeaderUnit).Remove(hdrHash) - if err != nil { - log.Error(err.Error()) - } - - nonceToByteSlice := mp.uint64Converter.ToByteSlice(hdr.Nonce) - err = mp.store.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Remove(nonceToByteSlice) - if err != nil { - log.Error(err.Error()) - } - - shardMBHeaderCounterMutex.Lock() - shardMBHeadersTotalProcessed -= len(hdr.MiniBlockHeaders) - shardMBHeaderCounterMutex.Unlock() - } - - mp.removeLastNotarized() - - return nil + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilMetaBlockHeader + } + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil || headerPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + + headerNoncesPool := mp.dataPool.HeadersNonces() + if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { + return process.ErrNilHeadersNoncesDataPool + } + + hdrHashes := make([][]byte, 0) + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + hdrHashes = append(hdrHashes, shardData.HeaderHash) + } + + for _, hdrHash := range hdrHashes { + buff, err := mp.store.Get(dataRetriever.BlockHeaderUnit, hdrHash) + if err != nil { + log.Error(err.Error()) + continue + } + + hdr := block.Header{} + err = mp.marshalizer.Unmarshal(&hdr, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + headerPool.Put(hdrHash, &hdr) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(hdr.ShardId, hdrHash) + headerNoncesPool.Merge(hdr.Nonce, syncMap) + + err = mp.store.GetStorer(dataRetriever.BlockHeaderUnit).Remove(hdrHash) + if err != nil { + log.Error(err.Error()) + } + + nonceToByteSlice := mp.uint64Converter.ToByteSlice(hdr.Nonce) + err = mp.store.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Remove(nonceToByteSlice) + if err != nil { + log.Error(err.Error()) + } + + shardMBHeaderCounterMutex.Lock() + shardMBHeadersTotalProcessed -= len(hdr.MiniBlockHeaders) + shardMBHeaderCounterMutex.Unlock() + } + + mp.removeLastNotarized() + + return nil } // CreateBlockBody creates block body of metachain func (mp *metaProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - mp.blockSizeThrottler.ComputeMaxItems() - return &block.MetaBlockBody{}, nil + log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + mp.blockSizeThrottler.ComputeMaxItems() + return &block.MetaBlockBody{}, nil } func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint64, haveTime func() time.Duration) error { - hdrPool := mp.dataPool.ShardHeaders() - - msg := "" - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - headerHash := shardData.HeaderHash - shardMiniBlockHeader := &shardData.ShardMiniBlockHeaders[j] - err := mp.checkAndProcessShardMiniBlockHeader( - headerHash, - shardMiniBlockHeader, - hdrPool, - round, - shardData.ShardId, - ) - if err != nil { - return err - } - - msg = fmt.Sprintf("%s\n%s", msg, core.ToB64(shardMiniBlockHeader.Hash)) - } - } - - if len(msg) > 0 { - log.Debug(fmt.Sprintf("the following miniblocks hashes were successfully processed:%s\n", msg)) - } - - return nil + hdrPool := mp.dataPool.ShardHeaders() + + msg := "" + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + headerHash := shardData.HeaderHash + shardMiniBlockHeader := &shardData.ShardMiniBlockHeaders[j] + err := mp.checkAndProcessShardMiniBlockHeader( + headerHash, + shardMiniBlockHeader, + hdrPool, + round, + shardData.ShardId, + ) + if err != nil { + return err + } + + msg = fmt.Sprintf("%s\n%s", msg, core.ToB64(shardMiniBlockHeader.Hash)) + } + } + + if len(msg) > 0 { + log.Debug(fmt.Sprintf("the following miniblocks hashes were successfully processed:%s\n", msg)) + } + + return nil } // CommitBlock commits the block in the blockchain if everything was checked successfully func (mp *metaProcessor) CommitBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - var err error - defer func() { - if err != nil { - mp.RevertAccountState() - } - }() - - tempHeaderPool := make(map[string]*block.Header) - - err = checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - err = mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - header, ok := headerHandler.(*block.MetaBlock) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - buff, err := mp.marshalizer.Marshal(header) - if err != nil { - return err - } - - headerHash := mp.hasher.Compute(string(buff)) - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - errNotCritical := mp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) - log.LogIfError(errNotCritical) - - errNotCritical = mp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) - log.LogIfError(errNotCritical) - - headerNoncePool := mp.dataPool.HeadersNonces() - if headerNoncePool == nil { - err = process.ErrNilDataPoolHolder - return err - } - - //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(headerHandler.GetShardID(), headerHash) - headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) - - body, ok := bodyHandler.(*block.MetaBlockBody) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if header == nil { - return err - } - - mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) - - tempHeaderPool[string(shardData.HeaderHash)] = header - - buff, err = mp.marshalizer.Marshal(header) - if err != nil { - return err - } - - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) - log.LogIfError(errNotCritical) - - errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) - log.LogIfError(errNotCritical) - } - - mp.saveMetricCrossCheckBlockHeight() - - err = mp.saveLastNotarizedHeader(header) - if err != nil { - return err - } - - _, err = mp.accounts.Commit() - if err != nil { - return err - } - - log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", - header.Nonce, - core.ToB64(headerHash))) - - errNotCritical = mp.removeBlockInfoFromPool(header) - if errNotCritical != nil { - log.Info(errNotCritical.Error()) - } - - errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) - if errNotCritical != nil { - log.Info(errNotCritical.Error()) - } - - hdrsToAttestFinality := mp.nextKValidity - mp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) - - err = chainHandler.SetCurrentBlockBody(body) - if err != nil { - return err - } + var err error + defer func() { + if err != nil { + mp.RevertAccountState() + } + }() + + tempHeaderPool := make(map[string]*block.Header) + + err = checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + err = mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + buff, err := mp.marshalizer.Marshal(header) + if err != nil { + return err + } + + headerHash := mp.hasher.Compute(string(buff)) + nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) + errNotCritical := mp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) + log.LogIfError(errNotCritical) + + errNotCritical = mp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) + log.LogIfError(errNotCritical) + + headerNoncePool := mp.dataPool.HeadersNonces() + if headerNoncePool == nil { + err = process.ErrNilDataPoolHolder + return err + } + + //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(headerHandler.GetShardID(), headerHash) + headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) + + body, ok := bodyHandler.(*block.MetaBlockBody) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if header == nil { + return err + } + + mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) + + tempHeaderPool[string(shardData.HeaderHash)] = header + + buff, err = mp.marshalizer.Marshal(header) + if err != nil { + return err + } + + nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) + errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) + log.LogIfError(errNotCritical) + + errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) + log.LogIfError(errNotCritical) + } + + mp.saveMetricCrossCheckBlockHeight() + + err = mp.saveLastNotarizedHeader(header) + if err != nil { + return err + } + + _, err = mp.accounts.Commit() + if err != nil { + return err + } + + log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", + header.Nonce, + core.ToB64(headerHash))) + + errNotCritical = mp.removeBlockInfoFromPool(header) + if errNotCritical != nil { + log.Info(errNotCritical.Error()) + } + + errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) + if errNotCritical != nil { + log.Info(errNotCritical.Error()) + } + + hdrsToAttestFinality := mp.nextKValidity + mp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) + + err = chainHandler.SetCurrentBlockBody(body) + if err != nil { + return err + } - err = chainHandler.SetCurrentBlockHeader(header) - if err != nil { - return err - } + err = chainHandler.SetCurrentBlockHeader(header) + if err != nil { + return err + } - chainHandler.SetCurrentBlockHeaderHash(headerHash) - - if mp.core != nil && mp.core.TPSBenchmark() != nil { - mp.core.TPSBenchmark().Update(header) - } + chainHandler.SetCurrentBlockHeaderHash(headerHash) + + if mp.core != nil && mp.core.TPSBenchmark() != nil { + mp.core.TPSBenchmark().Update(header) + } - mp.indexBlock(header, tempHeaderPool) + mp.indexBlock(header, tempHeaderPool) - go mp.displayMetaBlock(header) + go mp.displayMetaBlock(header) - mp.blockSizeThrottler.Succeed(header.Round) + mp.blockSizeThrottler.Succeed(header.Round) - return nil + return nil } func (mp *metaProcessor) updateShardHeadersNonce(key uint32, value uint64) { - valueStoredI, ok := mp.shardsHeadersNonce.Load(key) - if !ok { - mp.shardsHeadersNonce.Store(key, value) - return - } - - valueStored, ok := valueStoredI.(uint64) - if !ok { - mp.shardsHeadersNonce.Store(key, value) - return - } - - if valueStored < value { - mp.shardsHeadersNonce.Store(key, value) - } + valueStoredI, ok := mp.shardsHeadersNonce.Load(key) + if !ok { + mp.shardsHeadersNonce.Store(key, value) + return + } + + valueStored, ok := valueStoredI.(uint64) + if !ok { + mp.shardsHeadersNonce.Store(key, value) + return + } + + if valueStored < value { + mp.shardsHeadersNonce.Store(key, value) + } } func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { - crossCheckBlockHeight := "" - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - valueStoredI, ok := mp.shardsHeadersNonce.Load(i) - if !ok { - continue - } - - valueStored, ok := valueStoredI.(uint64) - if !ok { - continue - } - - if i > 0 { - crossCheckBlockHeight += ", " - } - - crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) - } - - mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) + crossCheckBlockHeight := "" + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + valueStoredI, ok := mp.shardsHeadersNonce.Load(i) + if !ok { + continue + } + + valueStored, ok := valueStoredI.(uint64) + if !ok { + continue + } + + if i > 0 { + crossCheckBlockHeight += ", " + } + + crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) + } + + mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) } func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error { - mp.mutNotarizedHdrs.Lock() - defer mp.mutNotarizedHdrs.Unlock() - - if mp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - tmpLastNotarizedHdrForShard := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) - } - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - return err - } - - if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { - tmpLastNotarizedHdrForShard[header.ShardId] = header - } - } - - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) - DisplayLastNotarized(mp.marshalizer, mp.hasher, tmpLastNotarizedHdrForShard[i], i) - } - - return nil + mp.mutNotarizedHdrs.Lock() + defer mp.mutNotarizedHdrs.Unlock() + + if mp.notarizedHdrs == nil { + return process.ErrNotarizedHdrsSliceIsNil + } + + tmpLastNotarizedHdrForShard := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) + } + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { + return err + } + + if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { + tmpLastNotarizedHdrForShard[header.ShardId] = header + } + } + + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) + DisplayLastNotarized(mp.marshalizer, mp.hasher, tmpLastNotarizedHdrForShard[i], i) + } + + return nil } // gets all the headers from the metablock in sorted order per shard func (mp *metaProcessor) getSortedShardHdrsFromMetablock(metaBlock *block.MetaBlock) (map[uint32][]*block.Header, error) { - sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) - - requestedHeaders := 0 - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - log.Debug(err.Error()) - requestedHeaders++ - go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) - continue - } - - sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) - } - - if requestedHeaders > 0 { - return nil, process.ErrMissingHeader - } - - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) <= 1 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() - }) - } - - return sortedShardHdrs, nil + sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) + + requestedHeaders := 0 + for i := 0; i < len(metaBlock.ShardInfo); i++ { + shardData := metaBlock.ShardInfo[i] + header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { + log.Debug(err.Error()) + requestedHeaders++ + go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) + continue + } + + sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) + } + + if requestedHeaders > 0 { + return nil, process.ErrMissingHeader + } + + for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { + hdrsForShard := sortedShardHdrs[shId] + if len(hdrsForShard) <= 1 { + continue + } + + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() + }) + } + + return sortedShardHdrs, nil } // check if shard headers were signed and constructed correctly and returns headers which has to be // checked for finality func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, process.ErrNotarizedHdrsSliceIsNil - } - - tmpLastNotarized := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarized[i] = mp.lastNotarizedHdrForShard(i) - } - mp.mutNotarizedHdrs.RUnlock() - - sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) - if err != nil { - return nil, err - } - - highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) == 0 { - continue - } - - for i := 0; i < len(hdrsForShard); i++ { - err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) - if err != nil { - return nil, err - } - tmpLastNotarized[shId] = hdrsForShard[i] - highestNonceHdrs[shId] = hdrsForShard[i] - } - } - - return highestNonceHdrs, nil + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, process.ErrNotarizedHdrsSliceIsNil + } + + tmpLastNotarized := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + tmpLastNotarized[i] = mp.lastNotarizedHdrForShard(i) + } + mp.mutNotarizedHdrs.RUnlock() + + sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) + if err != nil { + return nil, err + } + + highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { + hdrsForShard := sortedShardHdrs[shId] + if len(hdrsForShard) == 0 { + continue + } + + for i := 0; i < len(hdrsForShard); i++ { + err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) + if err != nil { + return nil, err + } + tmpLastNotarized[shId] = hdrsForShard[i] + highestNonceHdrs[shId] = hdrsForShard[i] + } + } + + return highestNonceHdrs, nil } // check if shard headers are final by checking if newer headers were constructed upon them func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - if header == nil { - return process.ErrNilBlockHeader - } - - //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) - if err != nil { - return err - } - - for index, lastVerifiedHdr := range highestNonceHdrs { - if index != lastVerifiedHdr.GetShardID() { - return process.ErrShardIdMissmatch - } - - // verify if there are "K" block after current to make this one final - nextBlocksVerified := uint32(0) - shId := lastVerifiedHdr.GetShardID() - for i := 0; i < len(sortedHdrPerShard[shId]); i++ { - if nextBlocksVerified >= mp.nextKValidity { - break - } - - // found a header with the next nonce - tmpHdr := sortedHdrPerShard[shId][i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified < mp.nextKValidity { - go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) - return process.ErrHeaderNotFinal - } - } - - return nil + if header == nil { + return process.ErrNilBlockHeader + } + + //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done + _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) + if err != nil { + return err + } + + for index, lastVerifiedHdr := range highestNonceHdrs { + if index != lastVerifiedHdr.GetShardID() { + return process.ErrShardIdMissmatch + } + + // verify if there are "K" block after current to make this one final + nextBlocksVerified := uint32(0) + shId := lastVerifiedHdr.GetShardID() + for i := 0; i < len(sortedHdrPerShard[shId]); i++ { + if nextBlocksVerified >= mp.nextKValidity { + break + } + + // found a header with the next nonce + tmpHdr := sortedHdrPerShard[shId][i] + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified < mp.nextKValidity { + go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + return process.ErrHeaderNotFinal + } + } + + return nil } func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { - if currHdr == nil { - return false, nil - } - if sortedShardHdrs == nil { - return false, nil - } - if lastHdr == nil { - return false, nil - } - - err := mp.isHdrConstructionValid(currHdr, lastHdr) - if err != nil { - return false, nil - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := uint32(0) - hdrIds := make([]uint32, 0) - for i := 0; i < len(sortedShardHdrs); i++ { - if nextBlocksVerified >= mp.nextKValidity { - return true, hdrIds - } - - // found a header with the next nonce - tmpHdr := sortedShardHdrs[i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - hdrIds = append(hdrIds, uint32(i)) - } - } - - if nextBlocksVerified >= mp.nextKValidity { - return true, hdrIds - } - - return false, nil + if currHdr == nil { + return false, nil + } + if sortedShardHdrs == nil { + return false, nil + } + if lastHdr == nil { + return false, nil + } + + err := mp.isHdrConstructionValid(currHdr, lastHdr) + if err != nil { + return false, nil + } + + // verify if there are "K" block after current to make this one final + lastVerifiedHdr := currHdr + nextBlocksVerified := uint32(0) + hdrIds := make([]uint32, 0) + for i := 0; i < len(sortedShardHdrs); i++ { + if nextBlocksVerified >= mp.nextKValidity { + return true, hdrIds + } + + // found a header with the next nonce + tmpHdr := sortedShardHdrs[i] + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + hdrIds = append(hdrIds, uint32(i)) + } + } + + if nextBlocksVerified >= mp.nextKValidity { + return true, hdrIds + } + + return false, nil } // receivedHeader is a call back function which is called when a new header // is added in the headers pool func (mp *metaProcessor) receivedHeader(headerHash []byte) { - shardHdrsCache := mp.dataPool.ShardHeaders() - if shardHdrsCache == nil { - return - } - - shardHdrsNoncesCache := mp.dataPool.HeadersNonces() - if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { - return - } - - obj, ok := shardHdrsCache.Peek(headerHash) - if !ok { - return - } - - header, ok := obj.(data.HeaderHandler) - if !ok { - return - } - - log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", - core.ToB64(headerHash), - header.GetNonce())) - - mp.mutRequestedShardHdrsHashes.Lock() - - if !mp.allNeededShardHdrsFound { - if mp.requestedShardHdrsHashes[string(headerHash)] { - delete(mp.requestedShardHdrsHashes, string(headerHash)) - - if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { - mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() - } - } - - lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqShardHdrsHashes == 0 { - requestedBlockHeaders := mp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - areFinalAttestingHdrsInCache = true - } else { - log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) - } - } - - mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache - - mp.mutRequestedShardHdrsHashes.Unlock() - - if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { - mp.chRcvAllHdrs <- true - } - } else { - mp.mutRequestedShardHdrsHashes.Unlock() - } + shardHdrsCache := mp.dataPool.ShardHeaders() + if shardHdrsCache == nil { + return + } + + shardHdrsNoncesCache := mp.dataPool.HeadersNonces() + if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { + return + } + + obj, ok := shardHdrsCache.Peek(headerHash) + if !ok { + return + } + + header, ok := obj.(data.HeaderHandler) + if !ok { + return + } + + log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", + core.ToB64(headerHash), + header.GetNonce())) + + mp.mutRequestedShardHdrsHashes.Lock() + + if !mp.allNeededShardHdrsFound { + if mp.requestedShardHdrsHashes[string(headerHash)] { + delete(mp.requestedShardHdrsHashes, string(headerHash)) + + if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { + mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() + } + } + + lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) + areFinalAttestingHdrsInCache := false + if lenReqShardHdrsHashes == 0 { + requestedBlockHeaders := mp.requestFinalMissingHeaders() + if requestedBlockHeaders == 0 { + areFinalAttestingHdrsInCache = true + } else { + log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) + } + } + + mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache + + mp.mutRequestedShardHdrsHashes.Unlock() + + if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { + mp.chRcvAllHdrs <- true + } + } else { + mp.mutRequestedShardHdrsHashes.Unlock() + } } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related // to the block which should be processed func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { - requestedBlockHeaders := uint32(0) - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { - if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { - continue - } - - _, _, err := process.GetShardHeaderFromPoolWithNonce( - i, - shardId, - mp.dataPool.ShardHeaders(), - mp.dataPool.HeadersNonces()) - if err != nil { - requestedBlockHeaders++ - go mp.onRequestHeaderHandlerByNonce(shardId, i) - } - } - } - - return requestedBlockHeaders + requestedBlockHeaders := uint32(0) + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { + if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { + continue + } + + _, _, err := process.GetShardHeaderFromPoolWithNonce( + i, + shardId, + mp.dataPool.ShardHeaders(), + mp.dataPool.HeadersNonces()) + if err != nil { + requestedBlockHeaders++ + go mp.onRequestHeaderHandlerByNonce(shardId, i) + } + } + } + + return requestedBlockHeaders } func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { - mp.mutRequestedShardHdrsHashes.Lock() - - mp.allNeededShardHdrsFound = true - - if len(metaBlock.ShardInfo) == 0 { - mp.mutRequestedShardHdrsHashes.Unlock() - return 0, 0 - } - - missingHeaderHashes := mp.computeMissingHeaders(metaBlock) - - requestedBlockHeaders := uint32(0) - mp.requestedShardHdrsHashes = make(map[string]bool) - for shardId, headerHashes := range missingHeaderHashes { - for _, headerHash := range headerHashes { - requestedBlockHeaders++ - mp.requestedShardHdrsHashes[string(headerHash)] = true - go mp.onRequestHeaderHandler(shardId, headerHash) - } - } - - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } else { - requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } - } - - if !mp.allNeededShardHdrsFound { - process.EmptyChannel(mp.chRcvAllHdrs) - } - - mp.mutRequestedShardHdrsHashes.Unlock() - - return requestedBlockHeaders, requestedFinalBlockHeaders + mp.mutRequestedShardHdrsHashes.Lock() + + mp.allNeededShardHdrsFound = true + + if len(metaBlock.ShardInfo) == 0 { + mp.mutRequestedShardHdrsHashes.Unlock() + return 0, 0 + } + + missingHeaderHashes := mp.computeMissingHeaders(metaBlock) + + requestedBlockHeaders := uint32(0) + mp.requestedShardHdrsHashes = make(map[string]bool) + for shardId, headerHashes := range missingHeaderHashes { + for _, headerHash := range headerHashes { + requestedBlockHeaders++ + mp.requestedShardHdrsHashes[string(headerHash)] = true + go mp.onRequestHeaderHandler(shardId, headerHash) + } + } + + requestedFinalBlockHeaders := uint32(0) + if requestedBlockHeaders > 0 { + mp.allNeededShardHdrsFound = false + } else { + requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() + if requestedFinalBlockHeaders > 0 { + mp.allNeededShardHdrsFound = false + } + } + + if !mp.allNeededShardHdrsFound { + process.EmptyChannel(mp.chRcvAllHdrs) + } + + mp.mutRequestedShardHdrsHashes.Unlock() + + return requestedBlockHeaders, requestedFinalBlockHeaders } func (mp *metaProcessor) computeMissingHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { - missingHeaders := make(map[uint32][][]byte) - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } - - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - hdr, err := process.GetShardHeaderFromPool( - shardData.HeaderHash, - mp.dataPool.ShardHeaders()) - if err != nil { - missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) - continue - } - - if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { - mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce - } - } - - return missingHeaders + missingHeaders := make(map[uint32][][]byte) + mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { + mp.currHighestShardHdrsNonces[i] = uint64(0) + } + + for i := 0; i < len(metaBlock.ShardInfo); i++ { + shardData := metaBlock.ShardInfo[i] + hdr, err := process.GetShardHeaderFromPool( + shardData.HeaderHash, + mp.dataPool.ShardHeaders()) + if err != nil { + missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) + continue + } + + if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { + mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce + } + } + + return missingHeaders } func (mp *metaProcessor) checkAndProcessShardMiniBlockHeader( - headerHash []byte, - shardMiniBlockHeader *block.ShardMiniBlockHeader, - hdrPool storage.Cacher, - round uint64, - shardId uint32, + headerHash []byte, + shardMiniBlockHeader *block.ShardMiniBlockHeader, + hdrPool storage.Cacher, + round uint64, + shardId uint32, ) error { - if hdrPool == nil || hdrPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } - // TODO: real processing has to be done here, using metachain state - return nil + if hdrPool == nil || hdrPool.IsInterfaceNil() { + return process.ErrNilHeadersDataPool + } + // TODO: real processing has to be done here, using metachain state + return nil } func (mp *metaProcessor) createShardInfo( - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) ([]block.ShardData, error) { - shardInfo := make([]block.ShardData, 0) - lastPushedHdr := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - - if mp.accounts.JournalLen() != 0 { - return nil, process.ErrAccountStateDirty - } - - if !haveTime() { - log.Info(fmt.Sprintf("time is up after entered in createShardInfo method\n")) - return shardInfo, nil - } - - hdrPool := mp.dataPool.ShardHeaders() - if hdrPool == nil { - return nil, process.ErrNilHeadersDataPool - } - - mbHdrs := uint32(0) - - timeBefore := time.Now() - orderedHdrs, orderedHdrHashes, sortedHdrPerShard, err := mp.getOrderedHdrs(round) - timeAfter := time.Now() - - if !haveTime() { - log.Info(fmt.Sprintf("time is up after ordered %d hdrs in %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) - return shardInfo, nil - } - - log.Debug(fmt.Sprintf("time elapsed to ordered %d hdrs: %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) - - if err != nil { - return nil, err - } - - log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) - - // save last committed hdr for verification - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, process.ErrNotarizedHdrsSliceIsNil - } - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - lastPushedHdr[shardId] = mp.lastNotarizedHdrForShard(shardId) - } - mp.mutNotarizedHdrs.RUnlock() - - for index := range orderedHdrs { - shId := orderedHdrs[index].ShardId - - lastHdr, ok := lastPushedHdr[shId].(*block.Header) - if !ok { - continue - } - - isFinal, _ := mp.isShardHeaderValidFinal(orderedHdrs[index], lastHdr, sortedHdrPerShard[shId]) - if !isFinal { - continue - } - - lastPushedHdr[shId] = orderedHdrs[index] - - shardData := block.ShardData{} - shardData.ShardMiniBlockHeaders = make([]block.ShardMiniBlockHeader, 0) - shardData.TxCount = orderedHdrs[index].TxCount - shardData.ShardId = orderedHdrs[index].ShardId - shardData.HeaderHash = orderedHdrHashes[index] - - snapshot := mp.accounts.JournalLen() - - for i := 0; i < len(orderedHdrs[index].MiniBlockHeaders); i++ { - if !haveTime() { - break - } - - shardMiniBlockHeader := block.ShardMiniBlockHeader{} - shardMiniBlockHeader.SenderShardId = orderedHdrs[index].MiniBlockHeaders[i].SenderShardID - shardMiniBlockHeader.ReceiverShardId = orderedHdrs[index].MiniBlockHeaders[i].ReceiverShardID - shardMiniBlockHeader.Hash = orderedHdrs[index].MiniBlockHeaders[i].Hash - shardMiniBlockHeader.TxCount = orderedHdrs[index].MiniBlockHeaders[i].TxCount - - // execute shard miniblock to change the trie root hash - err := mp.checkAndProcessShardMiniBlockHeader( - orderedHdrHashes[index], - &shardMiniBlockHeader, - hdrPool, - round, - shardData.ShardId, - ) - - if err != nil { - log.Error(err.Error()) - err = mp.accounts.RevertToSnapshot(snapshot) - if err != nil { - log.Error(err.Error()) - } - break - } - - shardData.ShardMiniBlockHeaders = append(shardData.ShardMiniBlockHeaders, shardMiniBlockHeader) - mbHdrs++ - - recordsAddedInHeader := mbHdrs + uint32(len(shardInfo)) - spaceRemained := int32(maxItemsInBlock) - int32(recordsAddedInHeader) - 1 - - if spaceRemained <= 0 { - log.Info(fmt.Sprintf("max hdrs accepted in one block is reached: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil - } - } - - if !haveTime() { - log.Info(fmt.Sprintf("time is up: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil - } - - if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { - shardInfo = append(shardInfo, shardData) - } - } - - log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) - return shardInfo, nil + shardInfo := make([]block.ShardData, 0) + lastPushedHdr := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + + if mp.accounts.JournalLen() != 0 { + return nil, process.ErrAccountStateDirty + } + + if !haveTime() { + log.Info(fmt.Sprintf("time is up after entered in createShardInfo method\n")) + return shardInfo, nil + } + + hdrPool := mp.dataPool.ShardHeaders() + if hdrPool == nil { + return nil, process.ErrNilHeadersDataPool + } + + mbHdrs := uint32(0) + + timeBefore := time.Now() + orderedHdrs, orderedHdrHashes, sortedHdrPerShard, err := mp.getOrderedHdrs(round) + timeAfter := time.Now() + + if !haveTime() { + log.Info(fmt.Sprintf("time is up after ordered %d hdrs in %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) + return shardInfo, nil + } + + log.Debug(fmt.Sprintf("time elapsed to ordered %d hdrs: %v sec\n", len(orderedHdrs), timeAfter.Sub(timeBefore).Seconds())) + + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) + + // save last committed hdr for verification + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, process.ErrNotarizedHdrsSliceIsNil + } + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + lastPushedHdr[shardId] = mp.lastNotarizedHdrForShard(shardId) + } + mp.mutNotarizedHdrs.RUnlock() + + for index := range orderedHdrs { + shId := orderedHdrs[index].ShardId + + lastHdr, ok := lastPushedHdr[shId].(*block.Header) + if !ok { + continue + } + + isFinal, _ := mp.isShardHeaderValidFinal(orderedHdrs[index], lastHdr, sortedHdrPerShard[shId]) + if !isFinal { + continue + } + + lastPushedHdr[shId] = orderedHdrs[index] + + shardData := block.ShardData{} + shardData.ShardMiniBlockHeaders = make([]block.ShardMiniBlockHeader, 0) + shardData.TxCount = orderedHdrs[index].TxCount + shardData.ShardId = orderedHdrs[index].ShardId + shardData.HeaderHash = orderedHdrHashes[index] + + snapshot := mp.accounts.JournalLen() + + for i := 0; i < len(orderedHdrs[index].MiniBlockHeaders); i++ { + if !haveTime() { + break + } + + shardMiniBlockHeader := block.ShardMiniBlockHeader{} + shardMiniBlockHeader.SenderShardId = orderedHdrs[index].MiniBlockHeaders[i].SenderShardID + shardMiniBlockHeader.ReceiverShardId = orderedHdrs[index].MiniBlockHeaders[i].ReceiverShardID + shardMiniBlockHeader.Hash = orderedHdrs[index].MiniBlockHeaders[i].Hash + shardMiniBlockHeader.TxCount = orderedHdrs[index].MiniBlockHeaders[i].TxCount + + // execute shard miniblock to change the trie root hash + err := mp.checkAndProcessShardMiniBlockHeader( + orderedHdrHashes[index], + &shardMiniBlockHeader, + hdrPool, + round, + shardData.ShardId, + ) + + if err != nil { + log.Error(err.Error()) + err = mp.accounts.RevertToSnapshot(snapshot) + if err != nil { + log.Error(err.Error()) + } + break + } + + shardData.ShardMiniBlockHeaders = append(shardData.ShardMiniBlockHeaders, shardMiniBlockHeader) + mbHdrs++ + + recordsAddedInHeader := mbHdrs + uint32(len(shardInfo)) + spaceRemained := int32(maxItemsInBlock) - int32(recordsAddedInHeader) - 1 + + if spaceRemained <= 0 { + log.Info(fmt.Sprintf("max hdrs accepted in one block is reached: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil + } + } + + if !haveTime() { + log.Info(fmt.Sprintf("time is up: added %d hdrs from %d hdrs\n", mbHdrs, len(orderedHdrs))) + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil + } + + if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { + shardInfo = append(shardInfo, shardData) + } + } + + log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + return shardInfo, nil } func (mp *metaProcessor) createPeerInfo() ([]block.PeerData, error) { - // TODO: to be implemented - peerInfo := make([]block.PeerData, 0) - return peerInfo, nil + // TODO: to be implemented + peerInfo := make([]block.PeerData, 0) + return peerInfo, nil } // CreateBlockHeader creates a miniblock header list given a block body func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) - // TODO: add PrevRandSeed and RandSeed when BLS signing is completed - header := &block.MetaBlock{ - ShardInfo: make([]block.ShardData, 0), - PeerInfo: make([]block.PeerData, 0), - PrevRandSeed: make([]byte, 0), - RandSeed: make([]byte, 0), - } - - defer func() { - go mp.checkAndRequestIfShardHeadersMissing(round) - }() - - shardInfo, err := mp.createShardInfo(mp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) - if err != nil { - return nil, err - } - - peerInfo, err := mp.createPeerInfo() - if err != nil { - return nil, err - } - - header.ShardInfo = shardInfo - header.PeerInfo = peerInfo - header.RootHash = mp.getRootHash() - header.TxCount = getTxCount(shardInfo) - - mp.blockSizeThrottler.Add( - round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) - - return header, nil + log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + // TODO: add PrevRandSeed and RandSeed when BLS signing is completed + header := &block.MetaBlock{ + ShardInfo: make([]block.ShardData, 0), + PeerInfo: make([]block.PeerData, 0), + PrevRandSeed: make([]byte, 0), + RandSeed: make([]byte, 0), + } + + defer func() { + go mp.checkAndRequestIfShardHeadersMissing(round) + }() + + shardInfo, err := mp.createShardInfo(mp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + if err != nil { + return nil, err + } + + peerInfo, err := mp.createPeerInfo() + if err != nil { + return nil, err + } + + header.ShardInfo = shardInfo + header.PeerInfo = peerInfo + header.RootHash = mp.getRootHash() + header.TxCount = getTxCount(shardInfo) + + mp.blockSizeThrottler.Add( + round, + core.Max(header.ItemsInBody(), header.ItemsInHeader())) + + return header, nil } func (mp *metaProcessor) waitForBlockHeaders(waitTime time.Duration) error { - select { - case <-mp.chRcvAllHdrs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-mp.chRcvAllHdrs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } func (mp *metaProcessor) displayMetaBlock(header *block.MetaBlock) { - if header == nil { - return - } + if header == nil { + return + } - headerHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, header) - if err != nil { - log.Error(err.Error()) - return - } + headerHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, header) + if err != nil { + log.Error(err.Error()) + return + } - mp.displayLogInfo(header, headerHash) + mp.displayLogInfo(header, headerHash) } func (mp *metaProcessor) displayLogInfo( - header *block.MetaBlock, - headerHash []byte, + header *block.MetaBlock, + headerHash []byte, ) { - dispHeader, dispLines := createDisplayableMetaHeader(header) - - tblString, err := display.CreateTableString(dispHeader, dispLines) - if err != nil { - log.Error(err.Error()) - return - } - - shardMBHeaderCounterMutex.RLock() - tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal shard MB headers "+ - "processed until now: %d. Total shard MB headers processed for this block: %d. Total shard headers remained in pool: %d\n", - core.ToB64(headerHash), - shardMBHeadersTotalProcessed, - shardMBHeadersCurrentBlockProcessed, - mp.getHeadersCountInPool()) - shardMBHeaderCounterMutex.RUnlock() - - log.Info(tblString) + dispHeader, dispLines := createDisplayableMetaHeader(header) + + tblString, err := display.CreateTableString(dispHeader, dispLines) + if err != nil { + log.Error(err.Error()) + return + } + + shardMBHeaderCounterMutex.RLock() + tblString = tblString + fmt.Sprintf("\nHeader hash: %s\n\nTotal shard MB headers "+ + "processed until now: %d. Total shard MB headers processed for this block: %d. Total shard headers remained in pool: %d\n", + core.ToB64(headerHash), + shardMBHeadersTotalProcessed, + shardMBHeadersCurrentBlockProcessed, + mp.getHeadersCountInPool()) + shardMBHeaderCounterMutex.RUnlock() + + log.Info(tblString) } func createDisplayableMetaHeader( - header *block.MetaBlock, + header *block.MetaBlock, ) ([]string, []*display.LineData) { - tableHeader := []string{"Part", "Parameter", "Value"} + tableHeader := []string{"Part", "Parameter", "Value"} - lines := displayHeader(header) + lines := displayHeader(header) - metaLines := make([]*display.LineData, 0) - metaLines = append(metaLines, display.NewLineData(false, []string{ - "Header", - "Block type", - "MetaBlock"})) - metaLines = append(metaLines, lines...) + metaLines := make([]*display.LineData, 0) + metaLines = append(metaLines, display.NewLineData(false, []string{ + "Header", + "Block type", + "MetaBlock"})) + metaLines = append(metaLines, lines...) - metaLines = displayShardInfo(metaLines, header) - return tableHeader, metaLines + metaLines = displayShardInfo(metaLines, header) + return tableHeader, metaLines } func displayShardInfo(lines []*display.LineData, header *block.MetaBlock) []*display.LineData { - shardMBHeaderCounterMutex.Lock() - shardMBHeadersCurrentBlockProcessed = 0 - shardMBHeaderCounterMutex.Unlock() - - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - lines = append(lines, display.NewLineData(false, []string{ - fmt.Sprintf("ShardData_%d", shardData.ShardId), - "Header hash", - base64.StdEncoding.EncodeToString(shardData.HeaderHash)})) - - if shardData.ShardMiniBlockHeaders == nil || len(shardData.ShardMiniBlockHeaders) == 0 { - lines = append(lines, display.NewLineData(false, []string{ - "", "ShardMiniBlockHeaders", ""})) - } - - shardMBHeaderCounterMutex.Lock() - shardMBHeadersCurrentBlockProcessed += len(shardData.ShardMiniBlockHeaders) - shardMBHeadersTotalProcessed += len(shardData.ShardMiniBlockHeaders) - shardMBHeaderCounterMutex.Unlock() - - for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { - if j == 0 || j >= len(shardData.ShardMiniBlockHeaders)-1 { - senderShard := shardData.ShardMiniBlockHeaders[j].SenderShardId - receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId - lines = append(lines, display.NewLineData(false, []string{ - "", - fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), - core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) - } else if j == 1 { - lines = append(lines, display.NewLineData(false, []string{ - "", - fmt.Sprintf("..."), - fmt.Sprintf("...")})) - } - } - - lines[len(lines)-1].HorizontalRuleAfter = true - } - - return lines + shardMBHeaderCounterMutex.Lock() + shardMBHeadersCurrentBlockProcessed = 0 + shardMBHeaderCounterMutex.Unlock() + + for i := 0; i < len(header.ShardInfo); i++ { + shardData := header.ShardInfo[i] + + lines = append(lines, display.NewLineData(false, []string{ + fmt.Sprintf("ShardData_%d", shardData.ShardId), + "Header hash", + base64.StdEncoding.EncodeToString(shardData.HeaderHash)})) + + if shardData.ShardMiniBlockHeaders == nil || len(shardData.ShardMiniBlockHeaders) == 0 { + lines = append(lines, display.NewLineData(false, []string{ + "", "ShardMiniBlockHeaders", ""})) + } + + shardMBHeaderCounterMutex.Lock() + shardMBHeadersCurrentBlockProcessed += len(shardData.ShardMiniBlockHeaders) + shardMBHeadersTotalProcessed += len(shardData.ShardMiniBlockHeaders) + shardMBHeaderCounterMutex.Unlock() + + for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { + if j == 0 || j >= len(shardData.ShardMiniBlockHeaders)-1 { + senderShard := shardData.ShardMiniBlockHeaders[j].SenderShardId + receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId + lines = append(lines, display.NewLineData(false, []string{ + "", + fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), + core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) + } else if j == 1 { + lines = append(lines, display.NewLineData(false, []string{ + "", + fmt.Sprintf("..."), + fmt.Sprintf("...")})) + } + } + + lines[len(lines)-1].HorizontalRuleAfter = true + } + + return lines } // MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination func (mp *metaProcessor) MarshalizedDataToBroadcast( - header data.HeaderHandler, - bodyHandler data.BodyHandler, + header data.HeaderHandler, + bodyHandler data.BodyHandler, ) (map[uint32][]byte, map[string][][]byte, error) { - mrsData := make(map[uint32][]byte) - mrsTxs := make(map[string][][]byte) + mrsData := make(map[uint32][]byte) + mrsTxs := make(map[string][][]byte) - // send headers which can validate the current header + // send headers which can validate the current header - return mrsData, mrsTxs, nil + return mrsData, mrsTxs, nil } func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte, map[uint32][]*block.Header, error) { - hdrStore := mp.dataPool.ShardHeaders() - if hdrStore == nil { - return nil, nil, nil, process.ErrNilCacher - } - - hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) - headersMap := make(map[uint32][]*block.Header) - headers := make([]*block.Header, 0) - hdrHashes := make([][]byte, 0) - - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, nil, nil, process.ErrNotarizedHdrsSliceIsNil - } - - // get keys and arrange them into shards - for _, key := range hdrStore.Keys() { - val, _ := hdrStore.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.Header) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - - currShardId := hdr.ShardId - if mp.lastNotarizedHdrForShard(currShardId) == nil { - continue - } - - if hdr.GetRound() <= mp.lastNotarizedHdrForShard(currShardId).GetRound() { - continue - } - - if hdr.GetNonce() <= mp.lastNotarizedHdrForShard(currShardId).GetNonce() { - continue - } - - hashAndBlockMap[currShardId] = append(hashAndBlockMap[currShardId], - &hashAndHdr{hdr: hdr, hash: key}) - } - mp.mutNotarizedHdrs.RUnlock() - - // sort headers for each shard - maxHdrLen := 0 - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if len(hdrsForShard) == 0 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].hdr.GetNonce() < hdrsForShard[j].hdr.GetNonce() - }) - - tmpHdrLen := len(hdrsForShard) - if maxHdrLen < tmpHdrLen { - maxHdrLen = tmpHdrLen - } - } - - // copy from map to lists - equality between number of headers per shard - for i := 0; i < maxHdrLen; i++ { - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if i >= len(hdrsForShard) { - continue - } - - hdr, ok := hdrsForShard[i].hdr.(*block.Header) - if !ok { - continue - } - - headers = append(headers, hdr) - hdrHashes = append(hdrHashes, hdrsForShard[i].hash) - headersMap[shardId] = append(headersMap[shardId], hdr) - } - } - - return headers, hdrHashes, headersMap, nil + hdrStore := mp.dataPool.ShardHeaders() + if hdrStore == nil { + return nil, nil, nil, process.ErrNilCacher + } + + hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) + headersMap := make(map[uint32][]*block.Header) + headers := make([]*block.Header, 0) + hdrHashes := make([][]byte, 0) + + mp.mutNotarizedHdrs.RLock() + if mp.notarizedHdrs == nil { + mp.mutNotarizedHdrs.RUnlock() + return nil, nil, nil, process.ErrNotarizedHdrsSliceIsNil + } + + // get keys and arrange them into shards + for _, key := range hdrStore.Keys() { + val, _ := hdrStore.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.Header) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + + currShardId := hdr.ShardId + if mp.lastNotarizedHdrForShard(currShardId) == nil { + continue + } + + if hdr.GetRound() <= mp.lastNotarizedHdrForShard(currShardId).GetRound() { + continue + } + + if hdr.GetNonce() <= mp.lastNotarizedHdrForShard(currShardId).GetNonce() { + continue + } + + hashAndBlockMap[currShardId] = append(hashAndBlockMap[currShardId], + &hashAndHdr{hdr: hdr, hash: key}) + } + mp.mutNotarizedHdrs.RUnlock() + + // sort headers for each shard + maxHdrLen := 0 + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := hashAndBlockMap[shardId] + if len(hdrsForShard) == 0 { + continue + } + + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].hdr.GetNonce() < hdrsForShard[j].hdr.GetNonce() + }) + + tmpHdrLen := len(hdrsForShard) + if maxHdrLen < tmpHdrLen { + maxHdrLen = tmpHdrLen + } + } + + // copy from map to lists - equality between number of headers per shard + for i := 0; i < maxHdrLen; i++ { + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := hashAndBlockMap[shardId] + if i >= len(hdrsForShard) { + continue + } + + hdr, ok := hdrsForShard[i].hdr.(*block.Header) + if !ok { + continue + } + + headers = append(headers, hdr) + hdrHashes = append(hdrHashes, hdrsForShard[i].hash) + headersMap[shardId] = append(headersMap[shardId], hdr) + } + } + + return headers, hdrHashes, headersMap, nil } func getTxCount(shardInfo []block.ShardData) uint32 { - txs := uint32(0) - for i := 0; i < len(shardInfo); i++ { - for j := 0; j < len(shardInfo[i].ShardMiniBlockHeaders); j++ { - txs += shardInfo[i].ShardMiniBlockHeaders[j].TxCount - } - } - - return txs + txs := uint32(0) + for i := 0; i < len(shardInfo); i++ { + for j := 0; j < len(shardInfo[i].ShardMiniBlockHeaders); j++ { + txs += shardInfo[i].ShardMiniBlockHeaders[j].TxCount + } + } + + return txs } func (mp *metaProcessor) getHeadersCountInPool() int { - headerPool := mp.dataPool.ShardHeaders() - if headerPool == nil { - log.Error(process.ErrNilHeadersDataPool.Error()) - return -1 - } + headerPool := mp.dataPool.ShardHeaders() + if headerPool == nil { + log.Error(process.ErrNilHeadersDataPool.Error()) + return -1 + } - return headerPool.Len() + return headerPool.Len() } // DecodeBlockBody method decodes block body from a given byte array func (mp *metaProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.MetaBlockBody + var body block.MetaBlockBody - err := mp.marshalizer.Unmarshal(&body, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := mp.marshalizer.Unmarshal(&body, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &body + return &body } // DecodeBlockHeader method decodes block header from a given byte array func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.MetaBlock + var header block.MetaBlock - err := mp.marshalizer.Unmarshal(&header, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := mp.marshalizer.Unmarshal(&header, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &header + return &header } // IsInterfaceNil returns true if there is no value under the interface func (mp *metaProcessor) IsInterfaceNil() bool { - if mp == nil { - return true - } - return false + if mp == nil { + return true + } + return false } diff --git a/process/block/preprocess/export_test.go b/process/block/preprocess/export_test.go index a51829514ce..0c628d58857 100644 --- a/process/block/preprocess/export_test.go +++ b/process/block/preprocess/export_test.go @@ -1,53 +1,53 @@ package preprocess func (txs *transactions) ReceivedTransaction(txHash []byte) { - txs.receivedTransaction(txHash) + txs.receivedTransaction(txHash) } func (txs *transactions) AddTxHashToRequestedList(txHash []byte) { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() - if txs.txsForCurrBlock.txHashAndInfo == nil { - txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) - } - txs.txsForCurrBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} + if txs.txsForCurrBlock.txHashAndInfo == nil { + txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) + } + txs.txsForCurrBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} } func (txs *transactions) IsTxHashRequested(txHash []byte) bool { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + defer txs.txsForCurrBlock.mutTxsForBlock.Unlock() - return txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx == nil || - txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() + return txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx == nil || + txs.txsForCurrBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() } func (txs *transactions) SetMissingTxs(missingTxs int) { - txs.txsForCurrBlock.mutTxsForBlock.Lock() - txs.txsForCurrBlock.missingTxs = missingTxs - txs.txsForCurrBlock.mutTxsForBlock.Unlock() + txs.txsForCurrBlock.mutTxsForBlock.Lock() + txs.txsForCurrBlock.missingTxs = missingTxs + txs.txsForCurrBlock.mutTxsForBlock.Unlock() } func (scr *smartContractResults) AddScrHashToRequestedList(txHash []byte) { - scr.scrForBlock.mutTxsForBlock.Lock() - defer scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + defer scr.scrForBlock.mutTxsForBlock.Unlock() - if scr.scrForBlock.txHashAndInfo == nil { - scr.scrForBlock.txHashAndInfo = make(map[string]*txInfo) - } - scr.scrForBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} + if scr.scrForBlock.txHashAndInfo == nil { + scr.scrForBlock.txHashAndInfo = make(map[string]*txInfo) + } + scr.scrForBlock.txHashAndInfo[string(txHash)] = &txInfo{txShardInfo: &txShardInfo{}} } func (scr *smartContractResults) IsScrHashRequested(txHash []byte) bool { - scr.scrForBlock.mutTxsForBlock.Lock() - defer scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + defer scr.scrForBlock.mutTxsForBlock.Unlock() - return scr.scrForBlock.txHashAndInfo[string(txHash)].tx == nil || - scr.scrForBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() + return scr.scrForBlock.txHashAndInfo[string(txHash)].tx == nil || + scr.scrForBlock.txHashAndInfo[string(txHash)].tx.IsInterfaceNil() } func (scr *smartContractResults) SetMissingScr(missingTxs int) { - scr.scrForBlock.mutTxsForBlock.Lock() - scr.scrForBlock.missingTxs = missingTxs - scr.scrForBlock.mutTxsForBlock.Unlock() + scr.scrForBlock.mutTxsForBlock.Lock() + scr.scrForBlock.missingTxs = missingTxs + scr.scrForBlock.mutTxsForBlock.Unlock() } diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 37700adbdcf..d164ba36fd3 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -1,481 +1,481 @@ package preprocess import ( - "fmt" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type rewardTxPreprocessor struct { - *basePreProcess - chReceivedAllRewardTxs chan bool - onRequestRewardTx func(shardID uint32, txHashes [][]byte) - rewardTxsForBlock txsForBlock - rewardTxPool dataRetriever.ShardedDataCacherNotifier - storage dataRetriever.StorageService - rewardsProcessor process.RewardTransactionProcessor - accounts state.AccountsAdapter + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter } // NewRewardTxPreprocessor creates a new reward transaction preprocessor object func NewRewardTxPreprocessor( - rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, - store dataRetriever.StorageService, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - rewardProcessor process.RewardTransactionProcessor, - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), ) (*rewardTxPreprocessor, error) { - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { - return nil, process.ErrNilRewardTxDataPool - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilStorage - } - if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { - return nil, process.ErrNilTxProcessor - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if onRequestRewardTransaction == nil { - return nil, process.ErrNilRequestHandler - } - - bpp := &basePreProcess{ - hasher: hasher, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - } - - rtp := &rewardTxPreprocessor{ - basePreProcess: bpp, - storage: store, - rewardTxPool: rewardTxDataPool, - onRequestRewardTx: onRequestRewardTransaction, - rewardsProcessor: rewardProcessor, - accounts: accounts, - } - - rtp.chReceivedAllRewardTxs = make(chan bool) - rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - - return rtp, nil + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilTxProcessor + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil } // waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { - select { - case <-rtp.chReceivedAllRewardTxs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } // IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { - if requestedRewardTxs > 0 { - log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) - err := rtp.waitForRewardTxHashes(haveTime()) - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - missingRewardTxs := rtp.rewardTxsForBlock.missingTxs - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) - if err != nil { - return err - } - } - return nil + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil } // RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { - if body == nil { - return process.ErrNilTxBlockBody - } + if body == nil { + return process.ErrNilTxBlockBody + } - return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) + return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) } // RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( - body block.Body, - miniBlockPool storage.Cacher, + body block.Body, + miniBlockPool storage.Cacher, ) (int, map[int][]byte, error) { - if miniBlockPool == nil { - return 0, nil, process.ErrNilMiniBlockPool - } - - miniBlockHashes := make(map[int][]byte) - - rewardTxsRestored := 0 - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - - strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) - rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - for txHash, txBuff := range rewardTxBuff { - tx := rewardTx.RewardTx{} - err = rtp.marshalizer.Unmarshal(&tx, txBuff) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) - } - - restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - miniBlockHashes[i] = restoredHash - rewardTxsRestored += len(miniBlock.TxHashes) - } - - return rewardTxsRestored, miniBlockHashes, nil + if miniBlockPool == nil { + return 0, nil, process.ErrNilMiniBlockPool + } + + miniBlockHashes := make(map[int][]byte) + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + miniBlockHashes[i] = restoredHash + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, miniBlockHashes, nil } // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { - // basic validation already done in interceptors - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - - for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - txHash := miniBlock.TxHashes[j] - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - if txInfo == nil || txInfo.tx == nil { - return process.ErrMissingTransaction - } - - rTx, ok := txInfo.tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - err := rtp.processRewardTransaction( - txHash, - rTx, - round, - miniBlock.SenderShardID, - miniBlock.ReceiverShardID, - ) - if err != nil { - return err - } - } - } - return nil + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txInfo == nil || txInfo.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txInfo.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil } func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { - for _, rewardMb := range computedRewardMiniblocks { - txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} - for _, txHash := range rewardMb.TxHashes { - tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) - if !ok { - log.Error("reward transaction should be in pool but not found") - continue - } - - rTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - log.Error("wrong type in reward transactions pool") - } - - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ - tx: rTx, - txShardInfo: txShardInfo, - } - } - } + for _, rewardMb := range computedRewardMiniblocks { + txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + for _, txHash := range rewardMb.TxHashes { + tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) + if !ok { + log.Error("reward transaction should be in pool but not found") + continue + } + + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error("wrong type in reward transactions pool") + } + + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ + tx: rTx, + txShardInfo: txShardInfo, + } + } + } } // SaveTxBlockToStorage saves the reward transactions from body into storage func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { - for i := 0; i < len(body); i++ { - miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - - err := rtp.saveTxsToStorage( - miniBlock.TxHashes, - &rtp.rewardTxsForBlock, - rtp.storage, - dataRetriever.RewardTransactionUnit, - ) - if err != nil { - return err - } - } - - return nil + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil } // receivedRewardTransaction is a callback function called when a new reward transaction // is added in the reward transactions pool func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { - receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) - if receivedAllMissing { - rtp.chReceivedAllRewardTxs <- true - } + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } } // CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round func (rtp *rewardTxPreprocessor) CreateBlockStarted() { - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() } // RequestBlockTransactions request for reward transactions if missing from a block.Body func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { - requestedRewardTxs := 0 - missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { - txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} - for _, txHash := range rewardTxHashesInfo.txHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} - } - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - for senderShardID, scrHashesInfo := range missingRewardTxsForShards { - requestedRewardTxs += len(scrHashesInfo.txHashes) - rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) - } - - return requestedRewardTxs + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { + txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} + for _, txHash := range rewardTxHashesInfo.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, scrHashesInfo := range missingRewardTxsForShards { + requestedRewardTxs += len(scrHashesInfo.txHashes) + rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) + } + + return requestedRewardTxs } // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { - rewardTxs := block.Body{} - for _, mb := range body { - if mb.Type != block.RewardsBlock { - continue - } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } - - rewardTxs = append(rewardTxs, mb) - } - - missingTxsForShard := rtp.computeExistingAndMissing( - rewardTxs, - &rtp.rewardTxsForBlock, - rtp.chReceivedAllRewardTxs, - block.RewardsBlock, - rtp.rewardTxPool, - ) - - return missingTxsForShard + rewardTxs := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxs = append(rewardTxs, mb) + } + + missingTxsForShard := rtp.computeExistingAndMissing( + rewardTxs, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) + + return missingTxsForShard } // processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool func (rtp *rewardTxPreprocessor) processRewardTransaction( - rewardTxHash []byte, - rewardTx *rewardTx.RewardTx, - round uint64, - sndShardId uint32, - dstShardId uint32, + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, ) error { - err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) - if err != nil { - return err - } + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } - txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - return nil + return nil } // RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { - missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) - rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) - return len(missingRewardTxsForMiniBlock) + return len(missingRewardTxsForMiniBlock) } // computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { - missingRewardTxs := make([][]byte, 0) - if mb.Type != block.RewardsBlock { - return missingRewardTxs - } - - for _, txHash := range mb.TxHashes { - tx, _ := process.GetTransactionHandlerFromPool( - mb.SenderShardID, - mb.ReceiverShardID, - txHash, - rtp.rewardTxPool, - ) - - if tx == nil { - missingRewardTxs = append(missingRewardTxs, txHash) - } - } - - return missingRewardTxs + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlock { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs } // getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( - mb *block.MiniBlock, - haveTime func() bool, + mb *block.MiniBlock, + haveTime func() bool, ) ([]*rewardTx.RewardTx, [][]byte, error) { - strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) - txCache := rtp.rewardTxPool.ShardDataStore(strCache) - if txCache == nil { - return nil, nil, process.ErrNilRewardTxDataPool - } - - // verify if all reward transactions exists - rewardTxs := make([]*rewardTx.RewardTx, 0) - txHashes := make([][]byte, 0) - for _, txHash := range mb.TxHashes { - if !haveTime() { - return nil, nil, process.ErrTimeIsOut - } - - tmp, ok := txCache.Peek(txHash) - if !ok { - return nil, nil, process.ErrNilRewardTransaction - } - - tx, ok := tmp.(*rewardTx.RewardTx) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - txHashes = append(txHashes, txHash) - rewardTxs = append(rewardTxs, tx) - } - - return rewardTxs, txHashes, nil + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, ok := txCache.Peek(txHash) + if !ok { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil } // CreateAndProcessMiniBlock creates the miniblock from storage and processes the reward transactions added into the miniblock func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { - return nil, nil + return nil, nil } // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { - if miniBlock.Type != block.RewardsBlock { - return process.ErrWrongTypeInMiniBlock - } - - miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) - if err != nil { - return err - } - - for index := range miniBlockRewardTxs { - if !haveTime() { - return process.ErrTimeIsOut - } - - err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) - if err != nil { - return err - } - } - - txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for index, txHash := range miniBlockTxHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - return nil + if miniBlock.Type != block.RewardsBlock { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + return process.ErrTimeIsOut + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil } // CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) - if err != nil { - return nil, err - } + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } - return marshaledRewardTxs, nil + return marshaledRewardTxs, nil } // GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { - rewardTxPool := make(map[string]data.TransactionHandler) + rewardTxPool := make(map[string]data.TransactionHandler) - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { - rewardTxPool[txHash] = txInfo.tx - } - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txInfo.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - return rewardTxPool + return rewardTxPool } // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { - if rtp == nil { - return true - } - return false + if rtp == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index d1ea0b5b8d7..68f90970db9 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,19 +1,19 @@ package preprocess import ( - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) const communityPercentage = 0.1 // 1 = 100%, 0 = 0% @@ -24,418 +24,418 @@ const burnPercentage = 0.4 // 1 = 100%, 0 = 0% var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - adrConv state.AddressConverter - store dataRetriever.StorageService - rewardTxPool dataRetriever.ShardedDataCacherNotifier - - mutGenRewardTxs sync.RWMutex - protocolRewards []data.TransactionHandler - feeRewards []data.TransactionHandler - - mut sync.Mutex - accumulatedFees *big.Int - rewardTxsForBlock map[string]*rewardTx.RewardTx + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + adrConv state.AddressConverter + store dataRetriever.StorageService + rewardTxPool dataRetriever.ShardedDataCacherNotifier + + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + feeRewards []data.TransactionHandler + + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx } // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( - address process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - adrConv state.AddressConverter, - store dataRetriever.StorageService, - rewardTxPool dataRetriever.ShardedDataCacherNotifier, + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + adrConv state.AddressConverter, + store dataRetriever.StorageService, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, ) (*rewardsHandler, error) { - if address == nil || address.IsInterfaceNil() { - return nil, process.ErrNilSpecialAddressHandler - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if adrConv == nil || adrConv.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilStorage - } - if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { - return nil, process.ErrNilRewardTxDataPool - } - - rtxh := &rewardsHandler{ - address: address, - shardCoordinator: shardCoordinator, - adrConv: adrConv, - hasher: hasher, - marshalizer: marshalizer, - store: store, - rewardTxPool: rewardTxPool, - } - - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) - - return rtxh, nil + if address == nil || address.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + + rtxh := &rewardsHandler{ + address: address, + shardCoordinator: shardCoordinator, + adrConv: adrConv, + hasher: hasher, + marshalizer: marshalizer, + store: store, + rewardTxPool: rewardTxPool, + } + + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil } // SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - for _, rTx := range rtxh.rewardTxsForBlock { - buff, err := rtxh.marshalizer.Marshal(rTx) - if err != nil { - return err - } - - errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) - if errNotCritical != nil { - log.Error(errNotCritical.Error()) - } - } - - return nil + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for _, rTx := range rtxh.rewardTxsForBlock { + buff, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return err + } + + errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Error(errNotCritical.Error()) + } + } + + return nil } // AddIntermediateTransactions adds intermediate transactions to local cache func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - for i := 0; i < len(txs); i++ { - addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } + for i := 0; i < len(txs); i++ { + addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } - if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { - continue - } + if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { + continue + } - rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) - if err != nil { - return err - } + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) + if err != nil { + return err + } - rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx - } + rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx + } - return nil + return nil } func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) { - address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return rtxh.shardCoordinator.NumberOfShards(), err - } - shardId := rtxh.shardCoordinator.ComputeId(address) + address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return rtxh.shardCoordinator.NumberOfShards(), err + } + shardId := rtxh.shardCoordinator.ComputeId(address) - return shardId, nil + return shardId, nil } // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - rtxh.mutGenRewardTxs.Lock() - calculatedRewardTxs := make([]data.TransactionHandler, 0) - rtxh.feeRewards = rtxh.createRewardFromFees() - rtxh.addTransactionsToPool(rtxh.feeRewards) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) - rtxh.mutGenRewardTxs.Unlock() + rtxh.mutGenRewardTxs.Lock() + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.feeRewards = rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rtxh.feeRewards) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.Unlock() - miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) - return miniBlocks + return miniBlocks } func (rtxh *rewardsHandler) addTransactionsToPool(rewardTxs []data.TransactionHandler) { - for _, rTx := range rewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - } - - // add the reward transaction to the the pool so that the processor can find it - cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) - rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) - } + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + } + + // add the reward transaction to the the pool so that the processor can find it + cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) + rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) + } } func (rtxh *rewardsHandler) miniblocksFromRewardTxs( - rewardTxs []data.TransactionHandler, + rewardTxs []data.TransactionHandler, ) map[uint32]*block.MiniBlock { - miniBlocks := make(map[uint32]*block.MiniBlock, 0) - - for _, rTx := range rewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - continue - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - continue - } - - var ok bool - var mb *block.MiniBlock - if mb, ok = miniBlocks[dstShId]; !ok { - mb = &block.MiniBlock{ - ReceiverShardID: dstShId, - SenderShardID: rtxh.shardCoordinator.SelfId(), - Type: block.RewardsBlock, - } - } - - mb.TxHashes = append(mb.TxHashes, txHash) - miniBlocks[dstShId] = mb - } - - return miniBlocks + miniBlocks := make(map[uint32]*block.MiniBlock, 0) + + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + SenderShardID: rtxh.shardCoordinator.SelfId(), + Type: block.RewardsBlock, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks } // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { - err := rtxh.verifyCreatedRewardsTxs() - return err + err := rtxh.verifyCreatedRewardsTxs() + return err } // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { - rtxh.cleanCachedData() - rewardTxs := rtxh.createProtocolRewards() - rtxh.addTransactionsToPool(rewardTxs) + rtxh.cleanCachedData() + rewardTxs := rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rewardTxs) } // CreateMarshalizedData creates the marshalized data for broadcasting purposes func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - marshaledTxs := make([][]byte, 0) - for _, txHash := range txHashes { - rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] - if !ok { - return nil, process.ErrRewardTxNotFound - } - - marshaledTx, err := rtxh.marshalizer.Marshal(rTx) - if err != nil { - return nil, process.ErrMarshalWithoutSuccess - } - marshaledTxs = append(marshaledTxs, marshaledTx) - } - - return marshaledTxs, nil + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + marshaledTxs := make([][]byte, 0) + for _, txHash := range txHashes { + rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] + if !ok { + return nil, process.ErrRewardTxNotFound + } + + marshaledTx, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + marshaledTxs = append(marshaledTxs, marshaledTx) + } + + return marshaledTxs, nil } // ProcessTransactionFee adds the tx cost to the accumulated amount func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { - if cost == nil { - log.Debug(process.ErrNilValue.Error()) - return - } - - rtxh.mut.Lock() - rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) - rtxh.mut.Unlock() + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() } // cleanCachedData deletes the cached data func (rtxh *rewardsHandler) cleanCachedData() { - rtxh.mut.Lock() - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) - rtxh.mut.Unlock() - - rtxh.mutGenRewardTxs.Lock() - rtxh.feeRewards = make([]data.TransactionHandler, 0) - rtxh.protocolRewards = make([]data.TransactionHandler, 0) - rtxh.mutGenRewardTxs.Unlock() + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() + + rtxh.mutGenRewardTxs.Lock() + rtxh.feeRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.Unlock() } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { - x := new(big.Float).SetInt(value) - y := big.NewFloat(percentage) + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) - z := new(big.Float).Mul(x, y) + z := new(big.Float).Mul(x, y) - op := big.NewInt(0) - result, _ := z.Int(op) + op := big.NewInt(0) + result, _ := z.Int(op) - return result + return result } func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) - currTx.RcvAddr = rtxh.address.LeaderAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) - currTx.RcvAddr = rtxh.address.BurnAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) - currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() - currTx.ShardId = rtxh.shardCoordinator.SelfId() - currTx.Epoch = rtxh.address.Epoch() - currTx.Round = rtxh.address.Round() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() - return currTx + return currTx } // createRewardFromFees creates the reward transactions from accumulated fees // According to economic paper, out of the block fees 50% are burned, 40% go to the // leader and 10% go to Elrond community fund. func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { - rtxh.accumulatedFees = big.NewInt(0) - return nil - } + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } - leaderTx := rtxh.createLeaderTx() - communityTx := rtxh.createCommunityTx() - burnTx := rtxh.createBurnTx() + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() - currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - return currFeeTxs + return currFeeTxs } // createProtocolRewards creates the protocol reward transactions func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { - consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() - consensusRewardTxs := make([]data.TransactionHandler, 0) - for _, address := range consensusRewardAddresses { - rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue - rTx.RcvAddr = []byte(address) - rTx.ShardId = rtxh.shardCoordinator.SelfId() - rTx.Epoch = rtxh.address.Epoch() - rTx.Round = rtxh.address.Round() + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardAddresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = rtxh.address.Epoch() + rTx.Round = rtxh.address.Round() - consensusRewardTxs = append(consensusRewardTxs, rTx) - } + consensusRewardTxs = append(consensusRewardTxs, rTx) + } - rtxh.mutGenRewardTxs.Lock() - rtxh.protocolRewards = consensusRewardTxs - rtxh.mutGenRewardTxs.Unlock() + rtxh.mutGenRewardTxs.Lock() + rtxh.protocolRewards = consensusRewardTxs + rtxh.mutGenRewardTxs.Unlock() - return consensusRewardTxs + return consensusRewardTxs } // VerifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { - calculatedRewardTxs := make([]data.TransactionHandler, 0) - rtxh.mutGenRewardTxs.RLock() - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) - calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) - rtxh.mutGenRewardTxs.RUnlock() - - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - totalFeesFromBlock := big.NewInt(0) - for _, rTx := range rtxh.rewardTxsForBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) - } - - totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedRewardTxs { - totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - - rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) - if err != nil { - return err - } - - txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] - if !ok { - return process.ErrRewardTxNotFound - } - if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { - return process.ErrRewardTxsDoNotMatch - } - } - - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - - return nil + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.RLock() + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.RUnlock() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, rTx := range rtxh.rewardTxsForBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedRewardTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) + if err != nil { + return err + } + + txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] + if !ok { + return process.ErrRewardTxNotFound + } + if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { + return process.ErrRewardTxsDoNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTotalTxsFeesDoNotMatch + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface func (rtxh *rewardsHandler) IsInterfaceNil() bool { - if rtxh == nil { - return true - } - return false + if rtxh == nil { + return true + } + return false } // GetAllCurrentFinishedTxs returns the cached finalized transactions for current round func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { - rtxh.mut.Lock() - - rewardTxPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range rtxh.rewardTxsForBlock { - - senderShard := txInfo.ShardId - receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) - if err != nil { - continue - } - if receiverShard != rtxh.shardCoordinator.SelfId() { - continue - } - if senderShard != rtxh.shardCoordinator.SelfId() { - continue - } - rewardTxPool[txHash] = txInfo - } - rtxh.mut.Unlock() - - return rewardTxPool + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range rtxh.rewardTxsForBlock { + + senderShard := txInfo.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = txInfo + } + rtxh.mut.Unlock() + + return rewardTxPool } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 37a81979044..fbad34382fb 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,388 +1,388 @@ package preprocess import ( - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" ) func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - nil, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilSpecialAddressHandler, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilSpecialAddressHandler, err) } func TestNewRewardTxHandler_NilHasher(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilHasher, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilHasher, err) } func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilMarshalizer, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - nil, - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewRewardTxHandler_NilAddressConverter(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - nil, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilAddressConverter, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilAddressConverter, err) } func TestNewRewardTxHandler_NilChainStorer(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - nil, - tdp.RewardTransactions(), - ) - - assert.Nil(t, th) - assert.Equal(t, process.ErrNilStorage, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + nil, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilStorage, err) } func TestNewRewardTxHandler_NilRewardsPool(t *testing.T) { - t.Parallel() - - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - nil, - ) - - assert.Nil(t, th) - assert.NotNil(t, process.ErrNilRewardTxDataPool, err) + t.Parallel() + + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + nil, + ) + + assert.Nil(t, th) + assert.NotNil(t, process.ErrNilRewardTxDataPool, err) } func TestNewRewardTxHandler_ValsOk(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) } func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.AddIntermediateTransactions(nil) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.AddIntermediateTransactions(nil) + assert.Nil(t, err) } func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - th.ProcessTransactionFee(nil) - assert.Equal(t, big.NewInt(0), th.accumulatedFees) - - th.ProcessTransactionFee(big.NewInt(10)) - assert.Equal(t, big.NewInt(10), th.accumulatedFees) - - th.ProcessTransactionFee(big.NewInt(100)) - assert.Equal(t, big.NewInt(110), th.accumulatedFees) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(nil) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(10)) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(100)) + assert.Equal(t, big.NewInt(110), th.accumulatedFees) } func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - th.ProcessTransactionFee(big.NewInt(10)) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) - assert.Equal(t, big.NewInt(10), th.accumulatedFees) - assert.Equal(t, 1, len(th.rewardTxsForBlock)) - - th.cleanCachedData() - assert.Equal(t, big.NewInt(0), th.accumulatedFees) - assert.Equal(t, 0, len(th.rewardTxsForBlock)) + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(big.NewInt(10)) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + assert.Equal(t, 1, len(th.rewardTxsForBlock)) + + th.cleanCachedData() + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + assert.Equal(t, 0, len(th.rewardTxsForBlock)) } func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) - assert.Nil(t, err) - assert.NotNil(t, th) + assert.Nil(t, err) + assert.NotNil(t, th) - txs := th.createRewardFromFees() - assert.Equal(t, 0, len(txs)) + txs := th.createRewardFromFees() + assert.Equal(t, 0, len(txs)) - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) - txs = th.createRewardFromFees() - assert.Equal(t, 3, len(txs)) + txs = th.createRewardFromFees() + assert.Equal(t, 3, len(txs)) - totalSum := txs[0].GetValue().Uint64() - totalSum += txs[1].GetValue().Uint64() - totalSum += txs[2].GetValue().Uint64() + totalSum := txs[0].GetValue().Uint64() + totalSum += txs[1].GetValue().Uint64() + totalSum += txs[2].GetValue().Uint64() - assert.Equal(t, currTxFee.Uint64(), totalSum) + assert.Equal(t, currTxFee.Uint64(), totalSum) } func TestRewardTxHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - adrConv := &mock.AddressConverterMock{} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) - th, err := NewRewardTxHandler( - addr, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - adrConv, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.verifyCreatedRewardsTxs() - assert.Nil(t, err) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - _ = th.CreateAllInterMiniBlocks() - err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrRewardTxNotFound, err) + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) } func TestRewardTxHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - adrConv := &mock.AddressConverterMock{} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) - th, err := NewRewardTxHandler( - addr, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - adrConv, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - err = th.verifyCreatedRewardsTxs() - assert.Nil(t, err) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - extraVal := big.NewInt(100) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) - _ = th.CreateAllInterMiniBlocks() - err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + extraVal := big.NewInt(100) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) } func TestRewardTxHandlerVerifyCreatedRewardsTxsOK(t *testing.T) { - t.Parallel() - - tdp := initDataPool() - adrConv := &mock.AddressConverterMock{} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) - th, err := NewRewardTxHandler( - addr, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - adrConv, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) - _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) - _ = th.CreateAllInterMiniBlocks() - err = th.verifyCreatedRewardsTxs() - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) } func TestRewardTxHandlerCreateAllInterMiniBlocksOK(t *testing.T) { - t.Parallel() - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tdp := initDataPool() - th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{ - AdrConv: &mock.AddressConverterMock{}, - ShardCoordinator: shardCoordinator}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - &mock.AddressConverterMock{}, - &mock.ChainStorerMock{}, - tdp.RewardTransactions(), - ) - - assert.Nil(t, err) - assert.NotNil(t, th) - - mbs := th.CreateAllInterMiniBlocks() - assert.Equal(t, 0, len(mbs)) - - currTxFee := big.NewInt(50) - th.ProcessTransactionFee(currTxFee) - - mbs = th.CreateAllInterMiniBlocks() - assert.Equal(t, 1, len(mbs)) + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + mbs := th.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + mbs = th.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) } diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 8fd8d9066fd..059874a3b7b 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -392,7 +392,7 @@ func isSmartContractAddress(rcvAddress []byte) bool { return true } - isSCAddress := bytes.Equal(rcvAddress[:(numZerosForSCAddress-1)], make([]byte, numZerosForSCAddress-1)) + isSCAddress := bytes.Equal(rcvAddress[:(numZerosForSCAddress - 1)], make([]byte, numZerosForSCAddress-1)) if isSCAddress { return true } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index a7018fb2133..df5b56769de 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1,711 +1,711 @@ package preprocess import ( - "bytes" - "encoding/hex" - "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "math/big" - "math/rand" - "reflect" - "sync" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/stretchr/testify/assert" + "bytes" + "encoding/hex" + "fmt" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "math/big" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" ) func initDataPool() *mock.PoolsHolderStub { - sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &smartContractResult.SmartContractResult{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &smartContractResult.SmartContractResult{Nonce: 10}, true - } - return nil, false - }, - } - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &rewardTx.RewardTx{Value: big.NewInt(100)}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &rewardTx.RewardTx{Value: big.NewInt(100)}, true - } - return nil, false - }, - } - }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - }, - MiniBlocksCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - cs.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.RegisterHandlerCalled = func(i func(key []byte)) {} - cs.RemoveCalled = func(key []byte) {} - return cs - }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - return cs - }, - } - return sdp + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + } + }, + UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &smartContractResult.SmartContractResult{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &smartContractResult.SmartContractResult{Nonce: 10}, true + } + return nil, false + }, + } + }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + } + }, + HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + }, + MetaBlocksCalled: func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + }, + MiniBlocksCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.RegisterHandlerCalled = func(i func(key []byte)) {} + cs.RemoveCalled = func(key []byte) {} + return cs + }, + HeadersCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + return cs + }, + } + return sdp } func TestTxsPreprocessor_NewTransactionPreprocessorNilPool(t *testing.T) { - t.Parallel() + t.Parallel() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - nil, - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + nil, + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTransactionPool, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilStore(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - nil, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTxStorage, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTxStorage, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilHasher(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - nil, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilHasher, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilMarsalizer(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - nil, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + nil, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilTxProce(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilTxProcessor, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilTxProcessor, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilShardCoord(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - nil, - &mock.AccountsStub{}, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + nil, + &mock.AccountsStub{}, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilAccounts(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - nil, - requestTransaction, - ) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + requestTransaction, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilAccountsAdapter, err) } func TestTxsPreprocessor_NewTransactionPreprocessorNilRequestFunc(t *testing.T) { - t.Parallel() + t.Parallel() - tdp := initDataPool() - txs, err := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - nil, - ) + tdp := initDataPool() + txs, err := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + nil, + ) - assert.Nil(t, txs) - assert.Equal(t, process.ErrNilRequestHandler, err) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilRequestHandler, err) } func TestTxsPreProcessor_GetTransactionFromPool(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - txHash := []byte("tx1_hash") - tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) - assert.NotNil(t, txs) - assert.NotNil(t, tx) - assert.Equal(t, uint64(10), tx.(*transaction.Transaction).Nonce) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + txHash := []byte("tx1_hash") + tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) + assert.NotNil(t, txs) + assert.NotNil(t, tx) + assert.Equal(t, uint64(10), tx.(*transaction.Transaction).Nonce) } func TestTransactionPreprocessor_RequestTransactionFromNetwork(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - shardId := uint32(1) - txHash1 := []byte("tx_hash1") - txHash2 := []byte("tx_hash2") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash1) - txHashes = append(txHashes, txHash2) - mBlk := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} - body = append(body, &mBlk) - txsRequested := txs.RequestBlockTransactions(body) - assert.Equal(t, 2, txsRequested) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + shardId := uint32(1) + txHash1 := []byte("tx_hash1") + txHash2 := []byte("tx_hash2") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash1) + txHashes = append(txHashes, txHash2) + mBlk := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} + body = append(body, &mBlk) + txsRequested := txs.RequestBlockTransactions(body) + assert.Equal(t, 2, txsRequested) } func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - shardId := uint32(1) - txHash1 := []byte("tx_hash1") - txHash2 := []byte("tx_hash2") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash1) - txHashes = append(txHashes, txHash2) - mb := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} - txsRequested := txs.RequestTransactionsForMiniBlock(mb) - assert.Equal(t, 2, txsRequested) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + shardId := uint32(1) + txHash1 := []byte("tx_hash1") + txHash2 := []byte("tx_hash2") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash1) + txHashes = append(txHashes, txHash2) + mb := block.MiniBlock{ReceiverShardID: shardId, TxHashes: txHashes} + txsRequested := txs.RequestTransactionsForMiniBlock(mb) + assert.Equal(t, 2, txsRequested) } func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { - t.Parallel() - - dataPool := mock.NewPoolsHolderMock() - - shardedDataStub := &mock.ShardedDataStub{ - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &transaction.Transaction{}, true - }, - } - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, - } - - dataPool.SetTransactions(shardedDataStub) - - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - dataPool.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - //add 3 tx hashes on requested list - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - txs.AddTxHashToRequestedList(txHash1) - txs.AddTxHashToRequestedList(txHash2) - txs.AddTxHashToRequestedList(txHash3) - - txs.SetMissingTxs(3) - - //received txHash2 - txs.ReceivedTransaction(txHash2) - - assert.True(t, txs.IsTxHashRequested(txHash1)) - assert.False(t, txs.IsTxHashRequested(txHash2)) - assert.True(t, txs.IsTxHashRequested(txHash3)) + t.Parallel() + + dataPool := mock.NewPoolsHolderMock() + + shardedDataStub := &mock.ShardedDataStub{ + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &transaction.Transaction{}, true + }, + } + }, + RegisterHandlerCalled: func(i func(key []byte)) { + }, + } + + dataPool.SetTransactions(shardedDataStub) + + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + dataPool.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + //add 3 tx hashes on requested list + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + txs.AddTxHashToRequestedList(txHash1) + txs.AddTxHashToRequestedList(txHash2) + txs.AddTxHashToRequestedList(txHash3) + + txs.SetMissingTxs(3) + + //received txHash2 + txs.ReceivedTransaction(txHash2) + + assert.True(t, txs.IsTxHashRequested(txHash1)) + assert.False(t, txs.IsTxHashRequested(txHash2)) + assert.True(t, txs.IsTxHashRequested(txHash3)) } //------- GetAllTxsFromMiniBlock func computeHash(data interface{}, marshalizer marshal.Marshalizer, hasher hashing.Hasher) []byte { - buff, _ := marshalizer.Marshal(data) - return hasher.Compute(string(buff)) + buff, _ := marshalizer.Marshal(data) + return hasher.Compute(string(buff)) } func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - senderShardId := uint32(0) - destinationShardId := uint32(1) - - transactions := []*transaction.Transaction{ - {Nonce: 1}, - {Nonce: 2}, - {Nonce: 3}, - } - transactionsHashes := make([][]byte, len(transactions)) - - //add defined transactions to sender-destination cacher - for idx, tx := range transactions { - transactionsHashes[idx] = computeHash(tx, marshalizer, hasher) - - dataPool.Transactions().AddData( - transactionsHashes[idx], - tx, - process.ShardCacherIdentifier(senderShardId, destinationShardId), - ) - } - - //add some random data - txRandom := &transaction.Transaction{Nonce: 4} - dataPool.Transactions().AddData( - computeHash(txRandom, marshalizer, hasher), - txRandom, - process.ShardCacherIdentifier(3, 4), - ) - - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - dataPool.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - - mb := &block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: destinationShardId, - TxHashes: transactionsHashes, - } - - txsRetrieved, txHashesRetrieved, err := txs.getAllTxsFromMiniBlock(mb, func() bool { return true }) - - assert.Nil(t, err) - assert.Equal(t, len(transactions), len(txsRetrieved)) - assert.Equal(t, len(transactions), len(txHashesRetrieved)) - for idx, tx := range transactions { - //txReceived should be all txs in the same order - assert.Equal(t, txsRetrieved[idx], tx) - //verify corresponding transaction hashes - assert.Equal(t, txHashesRetrieved[idx], computeHash(tx, marshalizer, hasher)) - } + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + senderShardId := uint32(0) + destinationShardId := uint32(1) + + transactions := []*transaction.Transaction{ + {Nonce: 1}, + {Nonce: 2}, + {Nonce: 3}, + } + transactionsHashes := make([][]byte, len(transactions)) + + //add defined transactions to sender-destination cacher + for idx, tx := range transactions { + transactionsHashes[idx] = computeHash(tx, marshalizer, hasher) + + dataPool.Transactions().AddData( + transactionsHashes[idx], + tx, + process.ShardCacherIdentifier(senderShardId, destinationShardId), + ) + } + + //add some random data + txRandom := &transaction.Transaction{Nonce: 4} + dataPool.Transactions().AddData( + computeHash(txRandom, marshalizer, hasher), + txRandom, + process.ShardCacherIdentifier(3, 4), + ) + + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + dataPool.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + + mb := &block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: destinationShardId, + TxHashes: transactionsHashes, + } + + txsRetrieved, txHashesRetrieved, err := txs.getAllTxsFromMiniBlock(mb, func() bool { return true }) + + assert.Nil(t, err) + assert.Equal(t, len(transactions), len(txsRetrieved)) + assert.Equal(t, len(transactions), len(txHashesRetrieved)) + for idx, tx := range transactions { + //txReceived should be all txs in the same order + assert.Equal(t, txsRetrieved[idx], tx) + //verify corresponding transaction hashes + assert.Equal(t, txHashesRetrieved[idx], computeHash(tx, marshalizer, hasher)) + } } func TestTransactionPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilTxBlockBody) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilTxBlockBody) } func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { - t.Parallel() - tdp := initDataPool() - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - txs, _ := NewTransactionPreprocessor( - tdp.Transactions(), - &mock.ChainStorerMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.TxProcessorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - body := make(block.Body, 0) - txHash := []byte("txHash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - err := txs.RemoveTxBlockFromPools(body, tdp.MiniBlocks()) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, _ := NewTransactionPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + body := make(block.Body, 0) + txHash := []byte("txHash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + err := txs.RemoveTxBlockFromPools(body, tdp.MiniBlocks()) + assert.Nil(t, err) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: uint64(i)} + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: uint64(i)} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, len(addedTxs), len(mb.TxHashes)) + assert.Equal(t, len(addedTxs), len(mb.TxHashes)) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCalls(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(5) + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(5) - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: []byte("012345678910")} + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: []byte("012345678910")} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, len(addedTxs), len(mb.TxHashes)) + assert.Equal(t, len(addedTxs), len(mb.TxHashes)) } func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCall(t *testing.T) { - t.Parallel() + t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - requestTransaction := func(shardID uint32, txHashes [][]byte) {} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} - txs, _ := NewTransactionPreprocessor( - txPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.AccountsStub{}, - requestTransaction, - ) - assert.NotNil(t, txs) + txs, _ := NewTransactionPreprocessor( + txPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.TxProcessorMock{ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + requestTransaction, + ) + assert.NotNil(t, txs) - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - numTxsToAdd := 5 - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) + numTxsToAdd := 5 + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) - scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") - addedTxs := make([]*transaction.Transaction, 0) - for i := 0; i < 10; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} + scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") + addedTxs := make([]*transaction.Transaction, 0) + for i := 0; i < 10; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) - addedTxs = append(addedTxs, newTx) - } + addedTxs = append(addedTxs, newTx) + } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) - assert.Nil(t, err) + mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue, 10) + assert.Nil(t, err) - assert.Equal(t, numTxsToAdd, len(mb.TxHashes)) + assert.Equal(t, numTxsToAdd, len(mb.TxHashes)) } //------- SortTxByNonce @@ -714,167 +714,167 @@ var r *rand.Rand var mutex sync.Mutex func init() { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + r = rand.New(rand.NewSource(time.Now().UnixNano())) } func TestSortTxByNonce_NilCacherShouldErr(t *testing.T) { - t.Parallel() - transactions, txHashes, err := SortTxByNonce(nil) - assert.Nil(t, transactions) - assert.Nil(t, txHashes) - assert.Equal(t, process.ErrNilCacher, err) + t.Parallel() + transactions, txHashes, err := SortTxByNonce(nil) + assert.Nil(t, transactions) + assert.Nil(t, txHashes) + assert.Equal(t, process.ErrNilCacher, err) } func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 0, len(transactions)) - assert.Equal(t, 0, len(txHashes)) - assert.Nil(t, err) + t.Parallel() + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + transactions, txHashes, err := SortTxByNonce(cacher) + assert.Equal(t, 0, len(transactions)) + assert.Equal(t, 0, len(txHashes)) + assert.Nil(t, err) } func TestSortTxByNonce_OneTxShouldWork(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 1, len(transactions)) - assert.Equal(t, 1, len(txHashes)) - assert.Nil(t, err) - assert.True(t, hashInSlice(hash, txHashes)) - assert.True(t, txInSlice(tx, transactions)) + t.Parallel() + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + hash, tx := createRandTx(r) + cacher.HasOrAdd(hash, tx) + transactions, txHashes, err := SortTxByNonce(cacher) + assert.Equal(t, 1, len(transactions)) + assert.Equal(t, 1, len(txHashes)) + assert.Nil(t, err) + assert.True(t, hashInSlice(hash, txHashes)) + assert.True(t, txInSlice(tx, transactions)) } func createRandTx(rand *rand.Rand) ([]byte, *transaction.Transaction) { - mutex.Lock() - nonce := rand.Uint64() - mutex.Unlock() - tx := &transaction.Transaction{ - Nonce: nonce, - } - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - return hash, tx + mutex.Lock() + nonce := rand.Uint64() + mutex.Unlock() + tx := &transaction.Transaction{ + Nonce: nonce, + } + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + return hash, tx } func hashInSlice(hash []byte, hashes [][]byte) bool { - for _, h := range hashes { - if bytes.Equal(h, hash) { - return true - } - } - return false + for _, h := range hashes { + if bytes.Equal(h, hash) { + return true + } + } + return false } func txInSlice(tx *transaction.Transaction, transactions []*transaction.Transaction) bool { - for _, t := range transactions { - if reflect.DeepEqual(tx, t) { - return true - } - } - return false + for _, t := range transactions { + if reflect.DeepEqual(tx, t) { + return true + } + } + return false } func TestSortTxByNonce_MoreTransactionsShouldNotErr(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - _, _, err := SortTxByNonce(cache) - assert.Nil(t, err) + t.Parallel() + cache, _, _ := genCacherTransactionsHashes(100) + _, _, err := SortTxByNonce(cache) + assert.Nil(t, err) } func TestSortTxByNonce_MoreTransactionsShouldRetSameSize(t *testing.T) { - t.Parallel() - cache, genTransactions, _ := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - assert.Equal(t, len(genTransactions), len(transactions)) - assert.Equal(t, len(genTransactions), len(txHashes)) + t.Parallel() + cache, genTransactions, _ := genCacherTransactionsHashes(100) + transactions, txHashes, _ := SortTxByNonce(cache) + assert.Equal(t, len(genTransactions), len(transactions)) + assert.Equal(t, len(genTransactions), len(txHashes)) } func TestSortTxByNonce_MoreTransactionsShouldContainSameElements(t *testing.T) { - t.Parallel() - cache, genTransactions, genHashes := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - for i := 0; i < len(genTransactions); i++ { - assert.True(t, hashInSlice(genHashes[i], txHashes)) - assert.True(t, txInSlice(genTransactions[i], transactions)) - } + t.Parallel() + cache, genTransactions, genHashes := genCacherTransactionsHashes(100) + transactions, txHashes, _ := SortTxByNonce(cache) + for i := 0; i < len(genTransactions); i++ { + assert.True(t, hashInSlice(genHashes[i], txHashes)) + assert.True(t, txInSlice(genTransactions[i], transactions)) + } } func TestSortTxByNonce_MoreTransactionsShouldContainSortedElements(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - transactions, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(transactions); i++ { - tx := transactions[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Println(tx.Nonce) - lastNonce = tx.Nonce - } + t.Parallel() + cache, _, _ := genCacherTransactionsHashes(100) + transactions, _, _ := SortTxByNonce(cache) + lastNonce := uint64(0) + for i := 0; i < len(transactions); i++ { + tx := transactions[i] + assert.True(t, lastNonce <= tx.Nonce) + fmt.Println(tx.Nonce) + lastNonce = tx.Nonce + } } func TestSortTxByNonce_TransactionsWithSameNonceShouldGetSorted(t *testing.T) { - t.Parallel() - transactions := []*transaction.Transaction{ - {Nonce: 1, Signature: []byte("sig1")}, - {Nonce: 2, Signature: []byte("sig2")}, - {Nonce: 1, Signature: []byte("sig3")}, - {Nonce: 2, Signature: []byte("sig4")}, - {Nonce: 3, Signature: []byte("sig5")}, - } - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) - for _, tx := range transactions { - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - - cache.Put(hash, tx) - } - sortedTxs, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(sortedTxs); i++ { - tx := sortedTxs[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) - lastNonce = tx.Nonce - } - assert.Equal(t, len(sortedTxs), len(transactions)) - //test if one transaction from transactions might not be in sortedTx - for _, tx := range transactions { - found := false - for _, stx := range sortedTxs { - if reflect.DeepEqual(tx, stx) { - found = true - break - } - } - if !found { - assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) - } - } + t.Parallel() + transactions := []*transaction.Transaction{ + {Nonce: 1, Signature: []byte("sig1")}, + {Nonce: 2, Signature: []byte("sig2")}, + {Nonce: 1, Signature: []byte("sig3")}, + {Nonce: 2, Signature: []byte("sig4")}, + {Nonce: 3, Signature: []byte("sig5")}, + } + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) + for _, tx := range transactions { + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + cache.Put(hash, tx) + } + sortedTxs, _, _ := SortTxByNonce(cache) + lastNonce := uint64(0) + for i := 0; i < len(sortedTxs); i++ { + tx := sortedTxs[i] + assert.True(t, lastNonce <= tx.Nonce) + fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) + lastNonce = tx.Nonce + } + assert.Equal(t, len(sortedTxs), len(transactions)) + //test if one transaction from transactions might not be in sortedTx + for _, tx := range transactions { + found := false + for _, stx := range sortedTxs { + if reflect.DeepEqual(tx, stx) { + found = true + break + } + } + if !found { + assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) + } + } } func genCacherTransactionsHashes(noOfTx int) (storage.Cacher, []*transaction.Transaction, [][]byte) { - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) - genHashes := make([][]byte, 0) - genTransactions := make([]*transaction.Transaction, 0) - for i := 0; i < noOfTx; i++ { - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) + genHashes := make([][]byte, 0) + genTransactions := make([]*transaction.Transaction, 0) + for i := 0; i < noOfTx; i++ { + hash, tx := createRandTx(r) + cacher.HasOrAdd(hash, tx) - genHashes = append(genHashes, hash) - genTransactions = append(genTransactions, tx) - } - return cacher, genTransactions, genHashes + genHashes = append(genHashes, hash) + genTransactions = append(genTransactions, tx) + } + return cacher, genTransactions, genHashes } func BenchmarkSortTxByNonce1(b *testing.B) { - cache, _, _ := genCacherTransactionsHashes(10000) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = SortTxByNonce(cache) - } + cache, _, _ := genCacherTransactionsHashes(10000) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = SortTxByNonce(cache) + } } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 55ea7c27d19..69232c5b1a6 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,1572 +1,1572 @@ package block import ( - "fmt" - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/throttle" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/statusHandler" + "fmt" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/serviceContainer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/throttle" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/statusHandler" ) // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { - *baseProcessor - dataPool dataRetriever.PoolsHolder - blocksTracker process.BlocksTracker - metaBlockFinality int - - chRcvAllMetaHdrs chan bool - mutUsedMetaHdrsHashes sync.Mutex - usedMetaHdrsHashes map[uint64][][]byte - - mutRequestedMetaHdrsHashes sync.RWMutex - requestedMetaHdrsHashes map[string]bool - currHighestMetaHdrNonce uint64 - allNeededMetaHdrsFound bool - - core serviceContainer.Core - txCoordinator process.TransactionCoordinator - txCounter *transactionCounter + *baseProcessor + dataPool dataRetriever.PoolsHolder + blocksTracker process.BlocksTracker + metaBlockFinality int + + chRcvAllMetaHdrs chan bool + mutUsedMetaHdrsHashes sync.Mutex + usedMetaHdrsHashes map[uint64][][]byte + + mutRequestedMetaHdrsHashes sync.RWMutex + requestedMetaHdrsHashes map[string]bool + currHighestMetaHdrNonce uint64 + allNeededMetaHdrsFound bool + + core serviceContainer.Core + txCoordinator process.TransactionCoordinator + txCounter *transactionCounter } // NewShardProcessor creates a new shardProcessor object func NewShardProcessor( - core serviceContainer.Core, - dataPool dataRetriever.PoolsHolder, - store dataRetriever.StorageService, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - accounts state.AccountsAdapter, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - forkDetector process.ForkDetector, - blocksTracker process.BlocksTracker, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - txCoordinator process.TransactionCoordinator, - uint64Converter typeConverters.Uint64ByteSliceConverter, + core serviceContainer.Core, + dataPool dataRetriever.PoolsHolder, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, + forkDetector process.ForkDetector, + blocksTracker process.BlocksTracker, + startHeaders map[uint32]data.HeaderHandler, + requestHandler process.RequestHandler, + txCoordinator process.TransactionCoordinator, + uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*shardProcessor, error) { - err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - uint64Converter) - if err != nil { - return nil, err - } - - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if blocksTracker == nil || blocksTracker.IsInterfaceNil() { - return nil, process.ErrNilBlocksTracker - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - if txCoordinator == nil || txCoordinator.IsInterfaceNil() { - return nil, process.ErrNilTransactionCoordinator - } - - blockSizeThrottler, err := throttle.NewBlockSizeThrottle() - if err != nil { - return nil, err - } - - base := &baseProcessor{ - accounts: accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - specialAddressHandler: specialAddressHandler, - uint64Converter: uint64Converter, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - } - err = base.setLastNotarizedHeadersSlice(startHeaders) - if err != nil { - return nil, err - } - - sp := shardProcessor{ - core: core, - baseProcessor: base, - dataPool: dataPool, - blocksTracker: blocksTracker, - txCoordinator: txCoordinator, - txCounter: NewTransactionCounter(), - } - - sp.chRcvAllMetaHdrs = make(chan bool) - - transactionPool := sp.dataPool.Transactions() - if transactionPool == nil { - return nil, process.ErrNilTransactionPool - } - - sp.requestedMetaHdrsHashes = make(map[string]bool) - sp.usedMetaHdrsHashes = make(map[uint64][][]byte) - - metaBlockPool := sp.dataPool.MetaBlocks() - if metaBlockPool == nil { - return nil, process.ErrNilMetaBlockPool - } - metaBlockPool.RegisterHandler(sp.receivedMetaBlock) - sp.onRequestHeaderHandler = requestHandler.RequestHeader - - sp.metaBlockFinality = process.MetaBlockFinality - sp.allNeededMetaHdrsFound = true - - return &sp, nil + err := checkProcessorNilParameters( + accounts, + forkDetector, + hasher, + marshalizer, + store, + shardCoordinator, + nodesCoordinator, + specialAddressHandler, + uint64Converter) + if err != nil { + return nil, err + } + + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if blocksTracker == nil || blocksTracker.IsInterfaceNil() { + return nil, process.ErrNilBlocksTracker + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + if txCoordinator == nil || txCoordinator.IsInterfaceNil() { + return nil, process.ErrNilTransactionCoordinator + } + + blockSizeThrottler, err := throttle.NewBlockSizeThrottle() + if err != nil { + return nil, err + } + + base := &baseProcessor{ + accounts: accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: forkDetector, + hasher: hasher, + marshalizer: marshalizer, + store: store, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, + uint64Converter: uint64Converter, + onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + appStatusHandler: statusHandler.NewNilStatusHandler(), + } + err = base.setLastNotarizedHeadersSlice(startHeaders) + if err != nil { + return nil, err + } + + sp := shardProcessor{ + core: core, + baseProcessor: base, + dataPool: dataPool, + blocksTracker: blocksTracker, + txCoordinator: txCoordinator, + txCounter: NewTransactionCounter(), + } + + sp.chRcvAllMetaHdrs = make(chan bool) + + transactionPool := sp.dataPool.Transactions() + if transactionPool == nil { + return nil, process.ErrNilTransactionPool + } + + sp.requestedMetaHdrsHashes = make(map[string]bool) + sp.usedMetaHdrsHashes = make(map[uint64][][]byte) + + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { + return nil, process.ErrNilMetaBlockPool + } + metaBlockPool.RegisterHandler(sp.receivedMetaBlock) + sp.onRequestHeaderHandler = requestHandler.RequestHeader + + sp.metaBlockFinality = process.MetaBlockFinality + sp.allNeededMetaHdrsFound = true + + return &sp, nil } // ProcessBlock processes a block. It returns nil if all ok or the specific error func (sp *shardProcessor) ProcessBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, - haveTime func() time.Duration, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + haveTime func() time.Duration, ) error { - if haveTime == nil { - return process.ErrNilHaveTimeHandler - } - - err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - header, ok := headerHandler.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return process.ErrWrongTypeAssertion - } - - mbLen := len(body) - totalTxCount := 0 - for i := 0; i < mbLen; i++ { - totalTxCount += len(body[i].TxHashes) - } - sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) - - err = sp.checkHeaderBodyCorrelation(header, body) - if err != nil { - return err - } - - numTxWithDst := sp.txCounter.getNumTxsFromPool(header.ShardId, sp.dataPool, sp.shardCoordinator.NumberOfShards()) - - sp.appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, uint64(numTxWithDst)) - - log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) - - // give transaction coordinator the consensus group validators addresses where to send the rewards. - consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( - headerHandler.GetPrevRandSeed(), - headerHandler.GetRound(), - sp.shardCoordinator.SelfId(), - ) - if err != nil { - return err - } - - sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) - sp.txCoordinator.CreateBlockStarted() - sp.txCoordinator.RequestBlockTransactions(body) - requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) - - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - err = sp.txCoordinator.IsDataPreparedForProcessing(haveTime) - if err != nil { - return err - } - - if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) - err = sp.waitForMetaHdrHashes(haveTime()) - sp.mutRequestedMetaHdrsHashes.Lock() - sp.allNeededMetaHdrsFound = true - unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) - sp.mutRequestedMetaHdrsHashes.Unlock() - log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) - if err != nil { - return err - } - } - - if sp.accounts.JournalLen() != 0 { - return process.ErrAccountStateDirty - } - - defer func() { - go sp.checkAndRequestIfMetaHeadersMissing(header.Round) - }() - - err = sp.checkMetaHeadersValidityAndFinality(header) - if err != nil { - return err - } - - err = sp.verifyCrossShardMiniBlockDstMe(header) - if err != nil { - return err - } - - defer func() { - if err != nil { - sp.RevertAccountState() - } - }() - - err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) - if err != nil { - return err - } - - if !sp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch - return err - } - - err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) - if err != nil { - return err - } - - return nil + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started processing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + header, ok := headerHandler.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return process.ErrWrongTypeAssertion + } + + mbLen := len(body) + totalTxCount := 0 + for i := 0; i < mbLen; i++ { + totalTxCount += len(body[i].TxHashes) + } + sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) + + err = sp.checkHeaderBodyCorrelation(header, body) + if err != nil { + return err + } + + numTxWithDst := sp.txCounter.getNumTxsFromPool(header.ShardId, sp.dataPool, sp.shardCoordinator.NumberOfShards()) + + sp.appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, uint64(numTxWithDst)) + + log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) + + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + sp.shardCoordinator.SelfId(), + ) + if err != nil { + return err + } + + sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) + sp.txCoordinator.CreateBlockStarted() + sp.txCoordinator.RequestBlockTransactions(body) + requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) + + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + err = sp.txCoordinator.IsDataPreparedForProcessing(haveTime) + if err != nil { + return err + } + + if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { + log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) + err = sp.waitForMetaHdrHashes(haveTime()) + sp.mutRequestedMetaHdrsHashes.Lock() + sp.allNeededMetaHdrsFound = true + unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) + sp.mutRequestedMetaHdrsHashes.Unlock() + log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) + if err != nil { + return err + } + } + + if sp.accounts.JournalLen() != 0 { + return process.ErrAccountStateDirty + } + + defer func() { + go sp.checkAndRequestIfMetaHeadersMissing(header.Round) + }() + + err = sp.checkMetaHeadersValidityAndFinality(header) + if err != nil { + return err + } + + err = sp.verifyCrossShardMiniBlockDstMe(header) + if err != nil { + return err + } + + defer func() { + if err != nil { + sp.RevertAccountState() + } + }() + + err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) + if err != nil { + return err + } + + if !sp.verifyStateRoot(header.GetRootHash()) { + err = process.ErrRootStateMissmatch + return err + } + + err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) + if err != nil { + return err + } + + return nil } // SetConsensusRewardAddresses - sets the reward addresses for the current consensus group func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { - sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) + sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) } // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { - metablockCache := sp.dataPool.MetaBlocks() - if metablockCache == nil { - return process.ErrNilMetaBlockPool - } - - tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - currAddedMetaHdrs := make([]*block.MetaBlock, 0) - for _, metaHash := range header.MetaBlockHashes { - value, ok := metablockCache.Peek(metaHash) - if !ok { - return process.ErrNilMetaBlockHeader - } - - metaHdr, ok := value.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) - } - - if len(currAddedMetaHdrs) == 0 { - return nil - } - - sort.Slice(currAddedMetaHdrs, func(i, j int) bool { - return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce - }) - - for _, metaHdr := range currAddedMetaHdrs { - err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) - if err != nil { - return err - } - tmpNotedHdr = metaHdr - } - - err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) - if err != nil { - return err - } - - return nil + metablockCache := sp.dataPool.MetaBlocks() + if metablockCache == nil { + return process.ErrNilMetaBlockPool + } + + tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + currAddedMetaHdrs := make([]*block.MetaBlock, 0) + for _, metaHash := range header.MetaBlockHashes { + value, ok := metablockCache.Peek(metaHash) + if !ok { + return process.ErrNilMetaBlockHeader + } + + metaHdr, ok := value.(*block.MetaBlock) + if !ok { + return process.ErrWrongTypeAssertion + } + + currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) + } + + if len(currAddedMetaHdrs) == 0 { + return nil + } + + sort.Slice(currAddedMetaHdrs, func(i, j int) bool { + return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce + }) + + for _, metaHdr := range currAddedMetaHdrs { + err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + if err != nil { + return err + } + tmpNotedHdr = metaHdr + } + + err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) + if err != nil { + return err + } + + return nil } // check if shard headers are final by checking if newer headers were constructed upon them func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round uint64) error { - if header == nil || header.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - - sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return err - } - - lastVerifiedHdr := header - // verify if there are "K" block after current to make this one final - nextBlocksVerified := 0 - for _, tmpHdr := range sortedMetaHdrs { - if nextBlocksVerified >= sp.metaBlockFinality { - break - } - - // found a header with the next nonce - if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) - if err != nil { - log.Debug(err.Error()) - continue - } - - lastVerifiedHdr = tmpHdr.hdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified < sp.metaBlockFinality { - go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) - return process.ErrHeaderNotFinal - } - - return nil + if header == nil || header.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + + sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return err + } + + lastVerifiedHdr := header + // verify if there are "K" block after current to make this one final + nextBlocksVerified := 0 + for _, tmpHdr := range sortedMetaHdrs { + if nextBlocksVerified >= sp.metaBlockFinality { + break + } + + // found a header with the next nonce + if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + if err != nil { + log.Debug(err.Error()) + continue + } + + lastVerifiedHdr = tmpHdr.hdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified < sp.metaBlockFinality { + go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + return process.ErrHeaderNotFinal + } + + return nil } // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { - mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) - for i := 0; i < len(hdr.MiniBlockHeaders); i++ { - mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] - } - - if len(hdr.MiniBlockHeaders) != len(body) { - return process.ErrHeaderBodyMismatch - } - - for i := 0; i < len(body); i++ { - miniBlock := body[i] - - mbBytes, err := sp.marshalizer.Marshal(miniBlock) - if err != nil { - return err - } - mbHash := sp.hasher.Compute(string(mbBytes)) - - mbHdr, ok := mbHashesFromHdr[string(mbHash)] - if !ok { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.TxCount != uint32(len(miniBlock.TxHashes)) { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.ReceiverShardID != miniBlock.ReceiverShardID { - return process.ErrHeaderBodyMismatch - } - - if mbHdr.SenderShardID != miniBlock.SenderShardID { - return process.ErrHeaderBodyMismatch - } - } - - return nil + mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) + for i := 0; i < len(hdr.MiniBlockHeaders); i++ { + mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] + } + + if len(hdr.MiniBlockHeaders) != len(body) { + return process.ErrHeaderBodyMismatch + } + + for i := 0; i < len(body); i++ { + miniBlock := body[i] + + mbBytes, err := sp.marshalizer.Marshal(miniBlock) + if err != nil { + return err + } + mbHash := sp.hasher.Compute(string(mbBytes)) + + mbHdr, ok := mbHashesFromHdr[string(mbHash)] + if !ok { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.TxCount != uint32(len(miniBlock.TxHashes)) { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.ReceiverShardID != miniBlock.ReceiverShardID { + return process.ErrHeaderBodyMismatch + } + + if mbHdr.SenderShardID != miniBlock.SenderShardID { + return process.ErrHeaderBodyMismatch + } + } + + return nil } func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - log.Debug(err.Error()) - return - } - - sortedHdrs := make([]data.HeaderHandler, 0) - for i := 0; i < len(orderedMetaBlocks); i++ { - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - sortedHdrs = append(sortedHdrs, hdr) - } - - err = sp.requestHeadersIfMissing(sortedHdrs, sharding.MetachainShardId, round) - if err != nil { - log.Info(err.Error()) - } - - return + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + log.Debug(err.Error()) + return + } + + sortedHdrs := make([]data.HeaderHandler, 0) + for i := 0; i < len(orderedMetaBlocks); i++ { + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } + sortedHdrs = append(sortedHdrs, hdr) + } + + err = sp.requestHeadersIfMissing(sortedHdrs, sharding.MetachainShardId, round) + if err != nil { + log.Info(err.Error()) + } + + return } func (sp *shardProcessor) indexBlockIfNeeded( - body data.BodyHandler, - header data.HeaderHandler) { - if sp.core == nil || sp.core.Indexer() == nil { - return - } - - txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) - scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) - rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) - - for hash, tx := range scPool { - txPool[hash] = tx - } - for hash, tx := range rewardPool { - txPool[hash] = tx - } - - go sp.core.Indexer().SaveBlock(body, header, txPool) + body data.BodyHandler, + header data.HeaderHandler) { + if sp.core == nil || sp.core.Indexer() == nil { + return + } + + txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) + scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) + + for hash, tx := range scPool { + txPool[hash] = tx + } + for hash, tx := range rewardPool { + txPool[hash] = tx + } + + go sp.core.Indexer().SaveBlock(body, header, txPool) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) error { - if headerHandler == nil || headerHandler.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return process.ErrNilTxBlockBody - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return process.ErrWrongTypeAssertion - } - - header, ok := headerHandler.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - - restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) - go sp.txCounter.substractRestoredTxs(restoredTxNr) - if err != nil { - return err - } - - miniBlockHashes := header.MapMiniBlockHashesToShards() - err = sp.restoreMetaBlockIntoPool(miniBlockHashes, header.MetaBlockHashes) - if err != nil { - return err - } - - sp.removeLastNotarized() - - return nil + if headerHandler == nil || headerHandler.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return process.ErrNilTxBlockBody + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return process.ErrWrongTypeAssertion + } + + header, ok := headerHandler.(*block.Header) + if !ok { + return process.ErrWrongTypeAssertion + } + + restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) + go sp.txCounter.substractRestoredTxs(restoredTxNr) + if err != nil { + return err + } + + miniBlockHashes := header.MapMiniBlockHashesToShards() + err = sp.restoreMetaBlockIntoPool(miniBlockHashes, header.MetaBlockHashes) + if err != nil { + return err + } + + sp.removeLastNotarized() + + return nil } func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]uint32, metaBlockHashes [][]byte) error { - metaBlockPool := sp.dataPool.MetaBlocks() - if metaBlockPool == nil { - return process.ErrNilMetaBlockPool - } - - metaHeaderNoncesPool := sp.dataPool.HeadersNonces() - if metaHeaderNoncesPool == nil { - return process.ErrNilMetaHeadersNoncesDataPool - } - - for _, metaBlockHash := range metaBlockHashes { - buff, err := sp.store.Get(dataRetriever.MetaBlockUnit, metaBlockHash) - if err != nil { - continue - } - - metaBlock := block.MetaBlock{} - err = sp.marshalizer.Unmarshal(&metaBlock, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - metaBlockPool.Put(metaBlockHash, &metaBlock) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(metaBlock.GetShardID(), metaBlockHash) - metaHeaderNoncesPool.Merge(metaBlock.Nonce, syncMap) - - err = sp.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) - if err != nil { - log.Error(err.Error()) - } - - nonceToByteSlice := sp.uint64Converter.ToByteSlice(metaBlock.Nonce) - err = sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) - if err != nil { - log.Error(err.Error()) - } - } - - for _, metaBlockKey := range metaBlockPool.Keys() { - if len(miniBlockHashes) == 0 { - break - } - metaBlock, ok := metaBlockPool.Peek(metaBlockKey) - if !ok { - log.Error(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(data.HeaderHandler) - if !ok { - metaBlockPool.Remove(metaBlockKey) - log.Error(process.ErrWrongTypeAssertion.Error()) - continue - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[key] - if !ok { - continue - } - - hdr.SetMiniBlockProcessed([]byte(key), false) - } - } - - return nil + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { + return process.ErrNilMetaBlockPool + } + + metaHeaderNoncesPool := sp.dataPool.HeadersNonces() + if metaHeaderNoncesPool == nil { + return process.ErrNilMetaHeadersNoncesDataPool + } + + for _, metaBlockHash := range metaBlockHashes { + buff, err := sp.store.Get(dataRetriever.MetaBlockUnit, metaBlockHash) + if err != nil { + continue + } + + metaBlock := block.MetaBlock{} + err = sp.marshalizer.Unmarshal(&metaBlock, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + metaBlockPool.Put(metaBlockHash, &metaBlock) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(metaBlock.GetShardID(), metaBlockHash) + metaHeaderNoncesPool.Merge(metaBlock.Nonce, syncMap) + + err = sp.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) + if err != nil { + log.Error(err.Error()) + } + + nonceToByteSlice := sp.uint64Converter.ToByteSlice(metaBlock.Nonce) + err = sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) + if err != nil { + log.Error(err.Error()) + } + } + + for _, metaBlockKey := range metaBlockPool.Keys() { + if len(miniBlockHashes) == 0 { + break + } + metaBlock, ok := metaBlockPool.Peek(metaBlockKey) + if !ok { + log.Error(process.ErrNilMetaBlockHeader.Error()) + continue + } + + hdr, ok := metaBlock.(data.HeaderHandler) + if !ok { + metaBlockPool.Remove(metaBlockKey) + log.Error(process.ErrWrongTypeAssertion.Error()) + continue + } + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range miniBlockHashes { + _, ok := crossMiniBlockHashes[key] + if !ok { + continue + } + + hdr.SetMiniBlockProcessed([]byte(key), false) + } + } + + return nil } // CreateBlockBody creates a a list of miniblocks by filling them with transactions out of the transactions pools // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - sp.txCoordinator.CreateBlockStarted() - sp.blockSizeThrottler.ComputeMaxItems() + sp.txCoordinator.CreateBlockStarted() + sp.blockSizeThrottler.ComputeMaxItems() - miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) - if err != nil { - return nil, err - } + miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + if err != nil { + return nil, err + } - return miniBlocks, nil + return miniBlocks, nil } // CommitBlock commits the block in the blockchain if everything was checked successfully func (sp *shardProcessor) CommitBlock( - chainHandler data.ChainHandler, - headerHandler data.HeaderHandler, - bodyHandler data.BodyHandler, + chainHandler data.ChainHandler, + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, ) error { - var err error - defer func() { - if err != nil { - sp.RevertAccountState() - } - }() - - err = checkForNils(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", - headerHandler.GetRound(), - headerHandler.GetNonce())) - - err = sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) - if err != nil { - return err - } - - header, ok := headerHandler.(*block.Header) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - buff, err := sp.marshalizer.Marshal(header) - if err != nil { - return err - } - - headerHash := sp.hasher.Compute(string(buff)) - nonceToByteSlice := sp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - - errNotCritical := sp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, headerHash) - log.LogIfError(errNotCritical) - - errNotCritical = sp.store.Put(dataRetriever.BlockHeaderUnit, headerHash, buff) - log.LogIfError(errNotCritical) - - headerNoncePool := sp.dataPool.HeadersNonces() - if headerNoncePool == nil { - err = process.ErrNilDataPoolHolder - return err - } - - //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(headerHandler.GetShardID(), headerHash) - headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) - - body, ok := bodyHandler.(block.Body) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - - err = sp.txCoordinator.SaveBlockDataToStorage(body) - if err != nil { - return err - } - - for i := 0; i < len(body); i++ { - buff, err = sp.marshalizer.Marshal(body[i]) - if err != nil { - return err - } - - miniBlockHash := sp.hasher.Compute(string(buff)) - errNotCritical = sp.store.Put(dataRetriever.MiniBlockUnit, miniBlockHash, buff) - log.LogIfError(errNotCritical) - } - - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - err = sp.saveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - if err != nil { - return err - } - - headerMeta, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - sp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMeta.GetNonce())) - - _, err = sp.accounts.Commit() - if err != nil { - return err - } - - log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", - header.Nonce, - core.ToB64(headerHash))) - - sp.blocksTracker.AddBlock(header) - - errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - finalHeader, finalHeaderHash, errNotCritical := sp.getHighestHdrForOwnShardFromMetachain(header.Round) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeader, finalHeaderHash) - if errNotCritical != nil { - log.Debug(errNotCritical.Error()) - } - - hdrsToAttestFinality := uint32(header.Nonce - finalHeader.Nonce) - sp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) - - err = chainHandler.SetCurrentBlockBody(body) - if err != nil { - return err - } - - err = chainHandler.SetCurrentBlockHeader(header) - if err != nil { - return err - } - - chainHandler.SetCurrentBlockHeaderHash(headerHash) - - sp.indexBlockIfNeeded(bodyHandler, headerHandler) - - // write data to log - go sp.txCounter.displayLogInfo( - header, - body, - headerHash, - sp.shardCoordinator.NumberOfShards(), - sp.shardCoordinator.SelfId(), - sp.dataPool, - ) - - sp.blockSizeThrottler.Succeed(header.Round) - - return nil + var err error + defer func() { + if err != nil { + sp.RevertAccountState() + } + }() + + err = checkForNils(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + log.Debug(fmt.Sprintf("started committing block with round %d and nonce %d\n", + headerHandler.GetRound(), + headerHandler.GetNonce())) + + err = sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) + if err != nil { + return err + } + + header, ok := headerHandler.(*block.Header) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + buff, err := sp.marshalizer.Marshal(header) + if err != nil { + return err + } + + headerHash := sp.hasher.Compute(string(buff)) + nonceToByteSlice := sp.uint64Converter.ToByteSlice(header.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) + + errNotCritical := sp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, headerHash) + log.LogIfError(errNotCritical) + + errNotCritical = sp.store.Put(dataRetriever.BlockHeaderUnit, headerHash, buff) + log.LogIfError(errNotCritical) + + headerNoncePool := sp.dataPool.HeadersNonces() + if headerNoncePool == nil { + err = process.ErrNilDataPoolHolder + return err + } + + //TODO: Should be analyzed if put in pool is really necessary or not (right now there is no action of removing them) + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(headerHandler.GetShardID(), headerHash) + headerNoncePool.Merge(headerHandler.GetNonce(), syncMap) + + body, ok := bodyHandler.(block.Body) + if !ok { + err = process.ErrWrongTypeAssertion + return err + } + + err = sp.txCoordinator.SaveBlockDataToStorage(body) + if err != nil { + return err + } + + for i := 0; i < len(body); i++ { + buff, err = sp.marshalizer.Marshal(body[i]) + if err != nil { + return err + } + + miniBlockHash := sp.hasher.Compute(string(buff)) + errNotCritical = sp.store.Put(dataRetriever.MiniBlockUnit, miniBlockHash, buff) + log.LogIfError(errNotCritical) + } + + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + err = sp.saveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + if err != nil { + return err + } + + headerMeta, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + sp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMeta.GetNonce())) + + _, err = sp.accounts.Commit() + if err != nil { + return err + } + + log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", + header.Nonce, + core.ToB64(headerHash))) + + sp.blocksTracker.AddBlock(header) + + errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + finalHeader, finalHeaderHash, errNotCritical := sp.getHighestHdrForOwnShardFromMetachain(header.Round) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeader, finalHeaderHash) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + hdrsToAttestFinality := uint32(header.Nonce - finalHeader.Nonce) + sp.removeNotarizedHdrsBehindFinal(hdrsToAttestFinality) + + err = chainHandler.SetCurrentBlockBody(body) + if err != nil { + return err + } + + err = chainHandler.SetCurrentBlockHeader(header) + if err != nil { + return err + } + + chainHandler.SetCurrentBlockHeaderHash(headerHash) + + sp.indexBlockIfNeeded(bodyHandler, headerHandler) + + // write data to log + go sp.txCounter.displayLogInfo( + header, + body, + headerHash, + sp.shardCoordinator.NumberOfShards(), + sp.shardCoordinator.SelfId(), + sp.dataPool, + ) + + sp.blockSizeThrottler.Succeed(header.Round) + + return nil } // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain(round uint64) (*block.Header, []byte, error) { - highestNonceOwnShIdHdr := &block.Header{} - highestNonceOwnShIdHdrHash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) + highestNonceOwnShIdHdr := &block.Header{} + highestNonceOwnShIdHdrHash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err - } + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err + } - lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err - } + lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, err + } - metaHdr, ok := lastNotarizedMetaHdr.(*block.MetaBlock) - if !ok { - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, process.ErrWrongTypeAssertion - } + metaHdr, ok := lastNotarizedMetaHdr.(*block.MetaBlock) + if !ok { + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, process.ErrWrongTypeAssertion + } - highestNonceOwnShIdHdr = sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), metaHdr) + highestNonceOwnShIdHdr = sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), metaHdr) - for i := 0; i < len(orderedMetaBlocks); i++ { - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } + for i := 0; i < len(orderedMetaBlocks); i++ { + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } - err = sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) - if err != nil { - continue - } + err = sp.isHdrConstructionValid(hdr, lastNotarizedMetaHdr) + if err != nil { + continue + } - isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) - if !isFinal { - continue - } + isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) + if !isFinal { + continue + } - lastNotarizedMetaHdr = hdr + lastNotarizedMetaHdr = hdr - highestHdr := sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), hdr) - if highestHdr.Nonce > highestNonceOwnShIdHdr.Nonce { - highestNonceOwnShIdHdr = highestHdr - } - } + highestHdr := sp.getHighestHdrForShardFromMetachain(sp.shardCoordinator.SelfId(), hdr) + if highestHdr.Nonce > highestNonceOwnShIdHdr.Nonce { + highestNonceOwnShIdHdr = highestHdr + } + } - highestNonceOwnShIdHdrHash, _ = core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) + highestNonceOwnShIdHdrHash, _ = core.CalculateHash(sp.marshalizer, sp.hasher, highestNonceOwnShIdHdr) - return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, nil + return highestNonceOwnShIdHdr, highestNonceOwnShIdHdrHash, nil } func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr *block.MetaBlock) *block.Header { - highestNonceOwnShIdHdr := &block.Header{} - // search for own shard id in shardInfo from metaHeaders - for _, shardInfo := range hdr.ShardInfo { - if shardInfo.ShardId != shardId { - continue - } - - ownHdr, err := process.GetShardHeader(shardInfo.HeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) - if err != nil { - continue - } - - // save the highest nonce - if ownHdr.GetNonce() > highestNonceOwnShIdHdr.GetNonce() { - highestNonceOwnShIdHdr = ownHdr - } - } - - return highestNonceOwnShIdHdr + highestNonceOwnShIdHdr := &block.Header{} + // search for own shard id in shardInfo from metaHeaders + for _, shardInfo := range hdr.ShardInfo { + if shardInfo.ShardId != shardId { + continue + } + + ownHdr, err := process.GetShardHeader(shardInfo.HeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) + if err != nil { + continue + } + + // save the highest nonce + if ownHdr.GetNonce() > highestNonceOwnShIdHdr.GetNonce() { + highestNonceOwnShIdHdr = ownHdr + } + } + + return highestNonceOwnShIdHdr } // getProcessedMetaBlocksFromPool returns all the meta blocks fully processed func (sp *shardProcessor) getProcessedMetaBlocksFromPool(body block.Body, header *block.Header) ([]data.HeaderHandler, error) { - if body == nil { - return nil, process.ErrNilTxBlockBody - } - if header == nil { - return nil, process.ErrNilBlockHeader - } - - miniBlockHashes := make(map[int][]byte, 0) - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { - continue - } - - mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) - if err != nil { - log.Debug(err.Error()) - continue - } - - miniBlockHashes[i] = mbHash - } - - log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - - processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range header.MetaBlockHashes { - metaBlock, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if metaBlock == nil { - log.Debug(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(*block.MetaBlock) - if !ok { - log.Debug(process.ErrWrongTypeAssertion.Error()) - continue - } - - log.Debug(fmt.Sprintf("meta header nonce: %d\n", hdr.Nonce)) - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] - if !ok { - continue - } - - hdr.SetMiniBlockProcessed(miniBlockHashes[key], true) - delete(miniBlockHashes, key) - } - - log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) - - processedAll := true - for key := range crossMiniBlockHashes { - if !hdr.GetMiniBlockProcessed([]byte(key)) { - processedAll = false - break - } - } - - if processedAll { - processedMetaHdrs = append(processedMetaHdrs, hdr) - } - } - - return processedMetaHdrs, nil + if body == nil { + return nil, process.ErrNilTxBlockBody + } + if header == nil { + return nil, process.ErrNilBlockHeader + } + + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { + continue + } + + mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) + if err != nil { + log.Debug(err.Error()) + continue + } + + miniBlockHashes[i] = mbHash + } + + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + + processedMetaHdrs := make([]data.HeaderHandler, 0) + for _, metaBlockKey := range header.MetaBlockHashes { + metaBlock, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + if metaBlock == nil { + log.Debug(process.ErrNilMetaBlockHeader.Error()) + continue + } + + hdr, ok := metaBlock.(*block.MetaBlock) + if !ok { + log.Debug(process.ErrWrongTypeAssertion.Error()) + continue + } + + log.Debug(fmt.Sprintf("meta header nonce: %d\n", hdr.Nonce)) + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] + if !ok { + continue + } + + hdr.SetMiniBlockProcessed(miniBlockHashes[key], true) + delete(miniBlockHashes, key) + } + + log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) + + processedAll := true + for key := range crossMiniBlockHashes { + if !hdr.GetMiniBlockProcessed([]byte(key)) { + processedAll = false + break + } + } + + if processedAll { + processedMetaHdrs = append(processedMetaHdrs, hdr) + } + } + + return processedMetaHdrs, nil } func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { - lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err - } - - processed := 0 - unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) - // processedMetaHdrs is also sorted - for i := 0; i < len(processedMetaHdrs); i++ { - hdr := processedMetaHdrs[i] - - // remove process finished - if hdr.GetNonce() > lastNotarizedMetaHdr.GetNonce() { - continue - } - - errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) - log.LogIfError(errNotCritical) - - // metablock was processed and finalized - buff, err := sp.marshalizer.Marshal(hdr) - if err != nil { - log.Error(err.Error()) - continue - } - - headerHash := sp.hasher.Compute(string(buff)) - nonceToByteSlice := sp.uint64Converter.ToByteSlice(hdr.GetNonce()) - err = sp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) - if err != nil { - log.Error(err.Error()) - continue - } - - err = sp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) - if err != nil { - log.Error(err.Error()) - continue - } - - sp.dataPool.MetaBlocks().Remove(headerHash) - sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) - - log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", - hdr.GetRound(), - hdr.GetNonce(), - core.ToB64(headerHash))) - - processed++ - } - - if processed > 0 { - log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) - } - - notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) - if notarized > 0 { - log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) - } - - return nil + lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err + } + + processed := 0 + unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) + // processedMetaHdrs is also sorted + for i := 0; i < len(processedMetaHdrs); i++ { + hdr := processedMetaHdrs[i] + + // remove process finished + if hdr.GetNonce() > lastNotarizedMetaHdr.GetNonce() { + continue + } + + errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) + log.LogIfError(errNotCritical) + + // metablock was processed and finalized + buff, err := sp.marshalizer.Marshal(hdr) + if err != nil { + log.Error(err.Error()) + continue + } + + headerHash := sp.hasher.Compute(string(buff)) + nonceToByteSlice := sp.uint64Converter.ToByteSlice(hdr.GetNonce()) + err = sp.store.Put(dataRetriever.MetaHdrNonceHashDataUnit, nonceToByteSlice, headerHash) + if err != nil { + log.Error(err.Error()) + continue + } + + err = sp.store.Put(dataRetriever.MetaBlockUnit, headerHash, buff) + if err != nil { + log.Error(err.Error()) + continue + } + + sp.dataPool.MetaBlocks().Remove(headerHash) + sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) + + log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", + hdr.GetRound(), + hdr.GetNonce(), + core.ToB64(headerHash))) + + processed++ + } + + if processed > 0 { + log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) + } + + notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) + if notarized > 0 { + log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) + } + + return nil } // receivedMetaBlock is a callback function when a new metablock was received // upon receiving, it parses the new metablock and requests miniblocks and transactions // which destination is the current shard func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { - metaBlksCache := sp.dataPool.MetaBlocks() - if metaBlksCache == nil { - return - } - - metaHdrsNoncesCache := sp.dataPool.HeadersNonces() - if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { - return - } - - miniBlksCache := sp.dataPool.MiniBlocks() - if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { - return - } - - obj, ok := metaBlksCache.Peek(metaBlockHash) - if !ok { - return - } - - metaBlock, ok := obj.(data.HeaderHandler) - if !ok { - return - } - - log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", - core.ToB64(metaBlockHash), - metaBlock.GetNonce())) - - sp.mutRequestedMetaHdrsHashes.Lock() - - if !sp.allNeededMetaHdrsFound { - if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { - delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) - - if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.GetNonce() - } - } - - lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqMetaHdrsHashes == 0 { - requestedBlockHeaders := sp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - areFinalAttestingHdrsInCache = true - } else { - log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) - } - } - - sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache - - sp.mutRequestedMetaHdrsHashes.Unlock() - - if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { - sp.chRcvAllMetaHdrs <- true - } - } else { - sp.mutRequestedMetaHdrsHashes.Unlock() - } - - lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return - } - if metaBlock.GetNonce() <= lastNotarizedHdr.GetNonce() { - return - } - if metaBlock.GetRound() <= lastNotarizedHdr.GetRound() { - return - } - - sp.txCoordinator.RequestMiniBlocks(metaBlock) + metaBlksCache := sp.dataPool.MetaBlocks() + if metaBlksCache == nil { + return + } + + metaHdrsNoncesCache := sp.dataPool.HeadersNonces() + if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { + return + } + + miniBlksCache := sp.dataPool.MiniBlocks() + if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { + return + } + + obj, ok := metaBlksCache.Peek(metaBlockHash) + if !ok { + return + } + + metaBlock, ok := obj.(data.HeaderHandler) + if !ok { + return + } + + log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", + core.ToB64(metaBlockHash), + metaBlock.GetNonce())) + + sp.mutRequestedMetaHdrsHashes.Lock() + + if !sp.allNeededMetaHdrsFound { + if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { + delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) + + if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = metaBlock.GetNonce() + } + } + + lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) + areFinalAttestingHdrsInCache := false + if lenReqMetaHdrsHashes == 0 { + requestedBlockHeaders := sp.requestFinalMissingHeaders() + if requestedBlockHeaders == 0 { + areFinalAttestingHdrsInCache = true + } else { + log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) + } + } + + sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache + + sp.mutRequestedMetaHdrsHashes.Unlock() + + if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { + sp.chRcvAllMetaHdrs <- true + } + } else { + sp.mutRequestedMetaHdrsHashes.Unlock() + } + + lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return + } + if metaBlock.GetNonce() <= lastNotarizedHdr.GetNonce() { + return + } + if metaBlock.GetRound() <= lastNotarizedHdr.GetRound() { + return + } + + sp.txCoordinator.RequestMiniBlocks(metaBlock) } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the metaBlockFinality headers greater than the highest meta header related to the block // which should be processed func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { - requestedBlockHeaders := uint32(0) - for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { - if sp.currHighestMetaHdrNonce == uint64(0) { - continue - } - - _, _, err := process.GetMetaHeaderFromPoolWithNonce( - i, - sp.dataPool.MetaBlocks(), - sp.dataPool.HeadersNonces()) - if err != nil { - requestedBlockHeaders++ - go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) - } - } - - return requestedBlockHeaders + requestedBlockHeaders := uint32(0) + for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { + if sp.currHighestMetaHdrNonce == uint64(0) { + continue + } + + _, _, err := process.GetMetaHeaderFromPoolWithNonce( + i, + sp.dataPool.MetaBlocks(), + sp.dataPool.HeadersNonces()) + if err != nil { + requestedBlockHeaders++ + go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) + } + } + + return requestedBlockHeaders } func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint32) { - sp.mutRequestedMetaHdrsHashes.Lock() - - sp.allNeededMetaHdrsFound = true - - if len(header.MetaBlockHashes) == 0 { - sp.mutRequestedMetaHdrsHashes.Unlock() - return 0, 0 - } - - missingHeaderHashes := sp.computeMissingHeaders(header) - - requestedBlockHeaders := uint32(0) - sp.requestedMetaHdrsHashes = make(map[string]bool) - for _, hash := range missingHeaderHashes { - requestedBlockHeaders++ - sp.requestedMetaHdrsHashes[string(hash)] = true - go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) - } - - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } else { - requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } - } - - if !sp.allNeededMetaHdrsFound { - process.EmptyChannel(sp.chRcvAllMetaHdrs) - } - - sp.mutRequestedMetaHdrsHashes.Unlock() - - return requestedBlockHeaders, requestedFinalBlockHeaders + sp.mutRequestedMetaHdrsHashes.Lock() + + sp.allNeededMetaHdrsFound = true + + if len(header.MetaBlockHashes) == 0 { + sp.mutRequestedMetaHdrsHashes.Unlock() + return 0, 0 + } + + missingHeaderHashes := sp.computeMissingHeaders(header) + + requestedBlockHeaders := uint32(0) + sp.requestedMetaHdrsHashes = make(map[string]bool) + for _, hash := range missingHeaderHashes { + requestedBlockHeaders++ + sp.requestedMetaHdrsHashes[string(hash)] = true + go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) + } + + requestedFinalBlockHeaders := uint32(0) + if requestedBlockHeaders > 0 { + sp.allNeededMetaHdrsFound = false + } else { + requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() + if requestedFinalBlockHeaders > 0 { + sp.allNeededMetaHdrsFound = false + } + } + + if !sp.allNeededMetaHdrsFound { + process.EmptyChannel(sp.chRcvAllMetaHdrs) + } + + sp.mutRequestedMetaHdrsHashes.Unlock() + + return requestedBlockHeaders, requestedFinalBlockHeaders } func (sp *shardProcessor) computeMissingHeaders(header *block.Header) [][]byte { - missingHeaders := make([][]byte, 0) - sp.currHighestMetaHdrNonce = uint64(0) - - for i := 0; i < len(header.MetaBlockHashes); i++ { - hdr, err := process.GetMetaHeaderFromPool( - header.MetaBlockHashes[i], - sp.dataPool.MetaBlocks()) - if err != nil { - missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) - continue - } - - if hdr.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = hdr.Nonce - } - } - - return missingHeaders + missingHeaders := make([][]byte, 0) + sp.currHighestMetaHdrNonce = uint64(0) + + for i := 0; i < len(header.MetaBlockHashes); i++ { + hdr, err := process.GetMetaHeaderFromPool( + header.MetaBlockHashes[i], + sp.dataPool.MetaBlocks()) + if err != nil { + missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) + continue + } + + if hdr.Nonce > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = hdr.Nonce + } + } + + return missingHeaders } func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) error { - mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) - if err != nil { - return err - } - - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - if _, ok := mMiniBlockMeta[mbHash]; !ok { - return process.ErrCrossShardMBWithoutConfirmationFromMeta - } - } - - return nil + mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) + if err != nil { + return err + } + + miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for mbHash := range miniBlockDstMe { + if _, ok := mMiniBlockMeta[mbHash]; !ok { + return process.ErrCrossShardMBWithoutConfirmationFromMeta + } + } + + return nil } func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes [][]byte) (map[string][]byte, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, err - } - - mMiniBlockMeta := make(map[string][]byte) - for _, metaHash := range metaHashes { - val, _ := metaBlockCache.Peek(metaHash) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - if hdr.GetRound() <= lastHdr.GetRound() { - continue - } - if hdr.GetNonce() <= lastHdr.GetNonce() { - continue - } - - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - mMiniBlockMeta[mbHash] = metaHash - } - } - - return mMiniBlockMeta, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil { + return nil, process.ErrNilMetaBlockPool + } + + lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, err + } + + mMiniBlockMeta := make(map[string][]byte) + for _, metaHash := range metaHashes { + val, _ := metaBlockCache.Peek(metaHash) + if val == nil { + continue + } + + hdr, ok := val.(*block.MetaBlock) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + if hdr.GetRound() <= lastHdr.GetRound() { + continue + } + if hdr.GetNonce() <= lastHdr.GetNonce() { + continue + } + + miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for mbHash := range miniBlockDstMe { + mMiniBlockMeta[mbHash] = metaHash + } + } + + return mMiniBlockMeta, nil } func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, err - } - - orderedMetaBlocks := make([]*hashAndHdr, 0) - for _, key := range metaBlockCache.Keys() { - val, _ := metaBlockCache.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - if hdr.GetRound() <= lastHdr.GetRound() { - continue - } - if hdr.GetNonce() <= lastHdr.GetNonce() { - continue - } - - orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) - } - - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) - - return orderedMetaBlocks, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil { + return nil, process.ErrNilMetaBlockPool + } + + lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, err + } + + orderedMetaBlocks := make([]*hashAndHdr, 0) + for _, key := range metaBlockCache.Keys() { + val, _ := metaBlockCache.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.MetaBlock) + if !ok { + continue + } + + if hdr.GetRound() > round { + continue + } + if hdr.GetRound() <= lastHdr.GetRound() { + continue + } + if hdr.GetNonce() <= lastHdr.GetNonce() { + continue + } + + orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) + } + + sort.Slice(orderedMetaBlocks, func(i, j int) bool { + return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() + }) + + return orderedMetaBlocks, nil } // isMetaHeaderFinal verifies if meta is trully final, in order to not do rollbacks func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHdrs []*hashAndHdr, startPos int) bool { - if currHdr == nil || currHdr.IsInterfaceNil() { - return false - } - if sortedHdrs == nil { - return false - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := 0 - - for i := startPos; i < len(sortedHdrs); i++ { - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - // found a header with the next nonce - tmpHdr := sortedHdrs[i].hdr - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - return false + if currHdr == nil || currHdr.IsInterfaceNil() { + return false + } + if sortedHdrs == nil { + return false + } + + // verify if there are "K" block after current to make this one final + lastVerifiedHdr := currHdr + nextBlocksVerified := 0 + + for i := startPos; i < len(sortedHdrs); i++ { + if nextBlocksVerified >= sp.metaBlockFinality { + return true + } + + // found a header with the next nonce + tmpHdr := sortedHdrs[i].hdr + if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if err != nil { + continue + } + + lastVerifiedHdr = tmpHdr + nextBlocksVerified += 1 + } + } + + if nextBlocksVerified >= sp.metaBlockFinality { + return true + } + + return false } // full verification through metachain header func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( - noShards uint32, - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + noShards uint32, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) (block.MiniBlockSlice, [][]byte, uint32, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMetaBlockPool - } - - miniBlockCache := sp.dataPool.MiniBlocks() - if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMiniBlockPool - } - - txPool := sp.dataPool.Transactions() - if txPool == nil || txPool.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilTransactionPool - } - - miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) - - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return nil, nil, 0, err - } - - log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) - - lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, nil, 0, err - } - - // do processing in order - usedMetaHdrsHashes := make([][]byte, 0) - for i := 0; i < len(orderedMetaBlocks); i++ { - if !haveTime() { - log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) - break - } - - itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) - if itemsAddedInHeader >= maxItemsInBlock { - log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) - break - } - - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - - err = sp.isHdrConstructionValid(hdr, lastMetaHdr) - if err != nil { - continue - } - - isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) - if !isFinal { - continue - } - - if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) - lastMetaHdr = hdr - continue - } - - itemsAddedInBody := nrTxAdded - if itemsAddedInBody >= maxItemsInBlock { - continue - } - - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 - - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr, - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) - - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, currMBProcessed...) - nrTxAdded = nrTxAdded + currTxsAdded - - if currTxsAdded > 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) - } - - if !hdrProcessFinished { - break - } - - lastMetaHdr = hdr - } - } - - sp.mutUsedMetaHdrsHashes.Lock() - sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes - sp.mutUsedMetaHdrsHashes.Unlock() - - return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil + metaBlockCache := sp.dataPool.MetaBlocks() + if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilMetaBlockPool + } + + miniBlockCache := sp.dataPool.MiniBlocks() + if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilMiniBlockPool + } + + txPool := sp.dataPool.Transactions() + if txPool == nil || txPool.IsInterfaceNil() { + return nil, nil, 0, process.ErrNilTransactionPool + } + + miniBlocks := make(block.MiniBlockSlice, 0) + nrTxAdded := uint32(0) + + orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + if err != nil { + return nil, nil, 0, err + } + + log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) + + lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return nil, nil, 0, err + } + + // do processing in order + usedMetaHdrsHashes := make([][]byte, 0) + for i := 0; i < len(orderedMetaBlocks); i++ { + if !haveTime() { + log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) + break + } + + itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) + if itemsAddedInHeader >= maxItemsInBlock { + log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) + break + } + + hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) + if !ok { + continue + } + + err = sp.isHdrConstructionValid(hdr, lastMetaHdr) + if err != nil { + continue + } + + isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) + if !isFinal { + continue + } + + if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { + usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + lastMetaHdr = hdr + continue + } + + itemsAddedInBody := nrTxAdded + if itemsAddedInBody >= maxItemsInBlock { + continue + } + + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) + maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 + + if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( + hdr, + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) + + // all txs processed, add to processed miniblocks + miniBlocks = append(miniBlocks, currMBProcessed...) + nrTxAdded = nrTxAdded + currTxsAdded + + if currTxsAdded > 0 { + usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + } + + if !hdrProcessFinished { + break + } + + lastMetaHdr = hdr + } + } + + sp.mutUsedMetaHdrsHashes.Lock() + sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes + sp.mutUsedMetaHdrsHashes.Unlock() + + return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil } func (sp *shardProcessor) createMiniBlocks( - noShards uint32, - maxItemsInBlock uint32, - round uint64, - haveTime func() bool, + noShards uint32, + maxItemsInBlock uint32, + round uint64, + haveTime func() bool, ) (block.Body, error) { - miniBlocks := make(block.Body, 0) + miniBlocks := make(block.Body, 0) - if sp.accounts.JournalLen() != 0 { - return nil, process.ErrAccountStateDirty - } + if sp.accounts.JournalLen() != 0 { + return nil, process.ErrAccountStateDirty + } - if !haveTime() { - log.Info(fmt.Sprintf("time is up after entered in createMiniBlocks method\n")) - return nil, process.ErrTimeIsOut - } + if !haveTime() { + log.Info(fmt.Sprintf("time is up after entered in createMiniBlocks method\n")) + return nil, process.ErrTimeIsOut + } - txPool := sp.dataPool.Transactions() - if txPool == nil { - return nil, process.ErrNilTransactionPool - } + txPool := sp.dataPool.Transactions() + if txPool == nil { + return nil, process.ErrNilTransactionPool + } - destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) - if err != nil { - log.Info(err.Error()) - } + destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + if err != nil { + log.Info(err.Error()) + } - log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) + log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) - if len(destMeMiniBlocks) > 0 { - miniBlocks = append(miniBlocks, destMeMiniBlocks...) - } + if len(destMeMiniBlocks) > 0 { + miniBlocks = append(miniBlocks, destMeMiniBlocks...) + } - if !haveTime() { - log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) - return miniBlocks, nil - } + if !haveTime() { + log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) + return miniBlocks, nil + } - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) + maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) + if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) - if len(mbFromMe) > 0 { - miniBlocks = append(miniBlocks, mbFromMe...) - } - } + if len(mbFromMe) > 0 { + miniBlocks = append(miniBlocks, mbFromMe...) + } + } - log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) - return miniBlocks, nil + log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) + return miniBlocks, nil } // CreateBlockHeader creates a miniblock header list given a block body func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) - header := &block.Header{ - MiniBlockHeaders: make([]block.MiniBlockHeader, 0), - RootHash: sp.getRootHash(), - ShardId: sp.shardCoordinator.SelfId(), - PrevRandSeed: make([]byte, 0), - RandSeed: make([]byte, 0), - } - - defer func() { - go sp.checkAndRequestIfMetaHeadersMissing(round) - }() - - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return header, nil - } - - body, ok := bodyHandler.(block.Body) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - mbLen := len(body) - totalTxCount := 0 - miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) - for i := 0; i < mbLen; i++ { - txCount := len(body[i].TxHashes) - totalTxCount += txCount - mbBytes, err := sp.marshalizer.Marshal(body[i]) - if err != nil { - return nil, err - } - mbHash := sp.hasher.Compute(string(mbBytes)) - - miniBlockHeaders[i] = block.MiniBlockHeader{ - Hash: mbHash, - SenderShardID: body[i].SenderShardID, - ReceiverShardID: body[i].ReceiverShardID, - TxCount: uint32(txCount), - Type: body[i].Type, - } - } - - header.MiniBlockHeaders = miniBlockHeaders - header.TxCount = uint32(totalTxCount) - - sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) - - sp.mutUsedMetaHdrsHashes.Lock() - - if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { - header.MetaBlockHashes = usedMetaHdrsHashes - delete(sp.usedMetaHdrsHashes, round) - } - - sp.mutUsedMetaHdrsHashes.Unlock() - - sp.blockSizeThrottler.Add( - round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) - - return header, nil + log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + header := &block.Header{ + MiniBlockHeaders: make([]block.MiniBlockHeader, 0), + RootHash: sp.getRootHash(), + ShardId: sp.shardCoordinator.SelfId(), + PrevRandSeed: make([]byte, 0), + RandSeed: make([]byte, 0), + } + + defer func() { + go sp.checkAndRequestIfMetaHeadersMissing(round) + }() + + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return header, nil + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + mbLen := len(body) + totalTxCount := 0 + miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) + for i := 0; i < mbLen; i++ { + txCount := len(body[i].TxHashes) + totalTxCount += txCount + mbBytes, err := sp.marshalizer.Marshal(body[i]) + if err != nil { + return nil, err + } + mbHash := sp.hasher.Compute(string(mbBytes)) + + miniBlockHeaders[i] = block.MiniBlockHeader{ + Hash: mbHash, + SenderShardID: body[i].SenderShardID, + ReceiverShardID: body[i].ReceiverShardID, + TxCount: uint32(txCount), + Type: body[i].Type, + } + } + + header.MiniBlockHeaders = miniBlockHeaders + header.TxCount = uint32(totalTxCount) + + sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) + + sp.mutUsedMetaHdrsHashes.Lock() + + if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { + header.MetaBlockHashes = usedMetaHdrsHashes + delete(sp.usedMetaHdrsHashes, round) + } + + sp.mutUsedMetaHdrsHashes.Unlock() + + sp.blockSizeThrottler.Add( + round, + core.Max(header.ItemsInBody(), header.ItemsInHeader())) + + return header, nil } func (sp *shardProcessor) waitForMetaHdrHashes(waitTime time.Duration) error { - select { - case <-sp.chRcvAllMetaHdrs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-sp.chRcvAllMetaHdrs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } // MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination func (sp *shardProcessor) MarshalizedDataToBroadcast( - header data.HeaderHandler, - bodyHandler data.BodyHandler, + header data.HeaderHandler, + bodyHandler data.BodyHandler, ) (map[uint32][]byte, map[string][][]byte, error) { - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return nil, nil, process.ErrNilMiniBlocks - } + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return nil, nil, process.ErrNilMiniBlocks + } - body, ok := bodyHandler.(block.Body) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + body, ok := bodyHandler.(block.Body) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - mrsData := make(map[uint32][]byte) - bodies, mrsTxs := sp.txCoordinator.CreateMarshalizedData(body) + mrsData := make(map[uint32][]byte) + bodies, mrsTxs := sp.txCoordinator.CreateMarshalizedData(body) - for shardId, subsetBlockBody := range bodies { - buff, err := sp.marshalizer.Marshal(subsetBlockBody) - if err != nil { - log.Debug(process.ErrMarshalWithoutSuccess.Error()) - continue - } - mrsData[shardId] = buff - } + for shardId, subsetBlockBody := range bodies { + buff, err := sp.marshalizer.Marshal(subsetBlockBody) + if err != nil { + log.Debug(process.ErrMarshalWithoutSuccess.Error()) + continue + } + mrsData[shardId] = buff + } - return mrsData, mrsTxs, nil + return mrsData, mrsTxs, nil } // DecodeBlockBody method decodes block body from a given byte array func (sp *shardProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var body block.Body + var body block.Body - err := sp.marshalizer.Unmarshal(&body, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := sp.marshalizer.Unmarshal(&body, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return body + return body } // DecodeBlockHeader method decodes block header from a given byte array func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { - if dta == nil { - return nil - } + if dta == nil { + return nil + } - var header block.Header + var header block.Header - err := sp.marshalizer.Unmarshal(&header, dta) - if err != nil { - log.Error(err.Error()) - return nil - } + err := sp.marshalizer.Unmarshal(&header, dta) + if err != nil { + log.Error(err.Error()) + return nil + } - return &header + return &header } // IsInterfaceNil returns true if there is no value under the interface func (sp *shardProcessor) IsInterfaceNil() bool { - if sp == nil { - return true - } - return false + if sp == nil { + return true + } + return false } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index e7cf1065e76..d141f5ed92d 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1,4883 +1,4883 @@ package block_test import ( - "bytes" - "errors" - "fmt" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/indexer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - blproc "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "bytes" + "errors" + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + blproc "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) //------- NewShardProcessor func initAccountsMock() *mock.AccountsStub { - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - return &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + return &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + } } func initBasicTestData() (*mock.PoolsHolderMock, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Round: 1, - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Round: 1, + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash } func initBlockHeader(prevHash []byte, prevRandSeed []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { - hdr := block.Header{ - Nonce: 2, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevRandSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - return hdr + hdr := block.Header{ + Nonce: 2, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevRandSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + return hdr } type methodFlags struct { - revertToSnapshotCalled bool - rootHashCalled bool + revertToSnapshotCalled bool + rootHashCalled bool } func defaultShardProcessor() (process.BlockProcessor, *methodFlags, error) { - // set accounts not dirty - flags := &methodFlags{} - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - flags.revertToSnapshotCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - flags.rootHashCalled = true - return []byte("rootHash"), nil - } - - accStub := initAccountsMock() - accStub.JournalLenCalled = journalLen - accStub.RevertToSnapshotCalled = revertToSnapshot - accStub.RootHashCalled = rootHashCalled - - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accStub, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - return sp, flags, err + // set accounts not dirty + flags := &methodFlags{} + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + flags.revertToSnapshotCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + flags.rootHashCalled = true + return []byte("rootHash"), nil + } + + accStub := initAccountsMock() + accStub.JournalLenCalled = journalLen + accStub.RevertToSnapshotCalled = revertToSnapshot + accStub.RootHashCalled = rootHashCalled + + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accStub, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + return sp, flags, err } //------- NewBlockProcessor func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - nil, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilDataPoolHolder, err) - assert.Nil(t, sp) + t.Parallel() + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + nil, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilDataPoolHolder, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilStoreShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - nil, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilStorage, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + nil, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilStorage, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - nil, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilHasher, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilMarshalizerShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - nil, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + nil, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilAccountsAdapterShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilAccountsAdapter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - nil, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilForkDetector, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilForkDetector, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - nil, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilBlocksTracker, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + nil, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilBlocksTracker, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - nil, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilRequestHandler, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + nil, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilRequestHandler, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTransactionPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return nil - } - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionPool, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return nil + } + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTxCoordinator(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - nil, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + nil, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilUint64Converter(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - nil, - ) - assert.Equal(t, process.ErrNilUint64Converter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + nil, + ) + assert.Equal(t, process.ErrNilUint64Converter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, err := defaultShardProcessor() - assert.Nil(t, err) - assert.NotNil(t, sp) + sp, _, err := defaultShardProcessor() + assert.Nil(t, err) + assert.NotNil(t, sp) } //------- ProcessBlock func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) - assert.Equal(t, process.ErrNilBlockChain, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - body := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) - assert.Equal(t, process.ErrNilBlockHeader, err) + sp, _, _ := defaultShardProcessor() + body := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) + assert.Equal(t, process.ErrNilBlockHeader, err) } func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) - assert.Equal(t, process.ErrNilBlockBody, err) + sp, _, _ := defaultShardProcessor() + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) + assert.Equal(t, process.ErrNilBlockBody, err) } func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) - assert.Equal(t, process.ErrNilHaveTimeHandler, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) + assert.Equal(t, process.ErrNilHaveTimeHandler, err) } func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - // set accounts dirty - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("roothash"), - } - body := make(block.Body, 0) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.NotNil(t, err) - assert.Equal(t, process.ErrAccountStateDirty, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + // set accounts dirty + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("roothash"), + } + body := make(block.Body, 0) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.NotNil(t, err) + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - sp, _, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + sp, _, _ := defaultShardProcessor() + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { return nil } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return process.ErrHigherNonceInTransaction - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err = sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { return nil } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return process.ErrHigherNonceInTransaction + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err = sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) } func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + t.Parallel() + + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + t.Parallel() + + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte("zzz"), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) + t.Parallel() + + randSeed := []byte("rand seed") + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) } func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldRevertState(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - err := errors.New("process block transaction error") - txProcess := func(transaction *transaction.Transaction, round uint64) error { - return err - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} - store := &mock.ChainStorerMock{} - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - tpm, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, err, err2) - assert.True(t, wasCalled) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } + + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + + err := errors.New("process block transaction error") + txProcess := func(transaction *transaction.Transaction, round uint64) error { + return err + } + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} + store := &mock.ChainStorerMock{} + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + tpm, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, err, err2) + assert.True(t, wasCalled) } func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHashX"), - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) - assert.True(t, flags.revertToSnapshotCalled) + t.Parallel() + + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHashX"), + MiniBlockHeaders: mbHdrs, + } + + sp, flags, _ := defaultShardProcessor() + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.True(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Nil(t, err) - assert.False(t, flags.revertToSnapshotCalled) + t.Parallel() + + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + + sp, flags, _ := defaultShardProcessor() + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Nil(t, err) + assert.False(t, flags.revertToSnapshotCalled) } func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tx := &transaction.Transaction{} - tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) + t.Parallel() + + randSeed := []byte("rand seed") + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tx := &transaction.Transaction{} + tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - randSeed := []byte("rand seed") - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) + t.Parallel() + + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) { - t.Parallel() - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - currHdr := blkc.GetCurrentBlockHeader() - preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) - hdr := block.Header{ - Round: 2, - Nonce: 2, - PrevHash: preHash, - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - haveTimeLessThanZero := func() time.Duration { - return -1 * time.Millisecond - } - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + currHdr := blkc.GetCurrentBlockHeader() + preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) + hdr := block.Header{ + Round: 2, + Nonce: 2, + PrevHash: preHash, + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + haveTimeLessThanZero := func() time.Duration { + return -1 * time.Millisecond + } + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - randSeed := []byte("rand seed") - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 1, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 1, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } //------- checkAndRequestIfMetaHeadersMissing func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing.T) { - t.Parallel() - - hdrNoncesRequestCalled := int32(0) - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - randSeed := []byte("rand seed") - - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{ - RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { - atomic.AddInt32(&hdrNoncesRequestCalled, 1) - }, - }, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - - sp.CheckAndRequestIfMetaHeadersMissing(2) - time.Sleep(100 * time.Millisecond) - assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) - assert.Equal(t, err, process.ErrTimeIsOut) + t.Parallel() + + hdrNoncesRequestCalled := int32(0) + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") + + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{ + RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { + atomic.AddInt32(&hdrNoncesRequestCalled, 1) + }, + }, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + + sp.CheckAndRequestIfMetaHeadersMissing(2) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) + assert.Equal(t, err, process.ErrTimeIsOut) } //-------- isMetaHeaderFinal func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) - res := sp.IsMetaHeaderFinal(&hdr, nil, 0) - assert.False(t, res) - res = sp.IsMetaHeaderFinal(nil, nil, 0) - assert.False(t, res) - - meta = &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - ordered, _ := sp.GetOrderedMetaBlocks(3) - res = sp.IsMetaHeaderFinal(meta, ordered, 0) - assert.True(t, res) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: metaHash, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + tdp.MetaBlocks().Put(metaHash, meta) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) + res := sp.IsMetaHeaderFinal(&hdr, nil, 0) + assert.False(t, res) + res = sp.IsMetaHeaderFinal(nil, nil, 0) + assert.False(t, res) + + meta = &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + ordered, _ := sp.GetOrderedMetaBlocks(3) + res = sp.IsMetaHeaderFinal(meta, ordered, 0) + assert.True(t, res) } //-------- requestFinalMissingHeaders func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - sp.SetCurrHighestMetaHdrNonce(1) - res := sp.RequestFinalMissingHeaders() - assert.Equal(t, res > 0, true) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + sp.SetCurrHighestMetaHdrNonce(1) + res := sp.RequestFinalMissingHeaders() + assert.Equal(t, res > 0, true) } //--------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - randSeed := []byte("rand seed") - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - prevMeta := genesisBlocks[sharding.MetachainShardId] - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMeta.GetRandSeed(), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - - tdp.MetaBlocks().Put(metaHash, meta) - - prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: prevHash, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr.Round = 4 - - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Nil(t, err) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + prevMeta := genesisBlocks[sharding.MetachainShardId] + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMeta.GetRandSeed(), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + + tdp.MetaBlocks().Put(metaHash, meta) + + prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: prevHash, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr.Round = 4 + + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Nil(t, err) } func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - mbHdrs := make([]block.MiniBlockHeader, 0) - rootHash := []byte("rootHash") - txHash := []byte("txhash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) + mbHdrs := make([]block.MiniBlockHeader, 0) + rootHash := []byte("rootHash") + txHash := []byte("txhash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) - tdp := mock.NewPoolsHolderMock() - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + tdp := mock.NewPoolsHolderMock() + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) - randSeed := []byte("rand seed") - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) - hdr.Round = 0 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) + hdr.Round = 0 + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Equal(t, err, process.ErrNilMetaBlockHeader) } //------- CommitBlock func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk := make(block.Body, 0) - - err := sp.CommitBlock(nil, &block.Header{}, blk) - assert.Equal(t, process.ErrNilBlockChain, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + accounts := &mock.AccountsStub{} + accounts.RevertToSnapshotCalled = func(snapshot int) error { + return nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk := make(block.Body, 0) + + err := sp.CommitBlock(nil, &block.Header{}, blk) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - errMarshalizer := errors.New("failure") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) (i []byte, e error) { - if reflect.DeepEqual(obj, hdr) { - return nil, errMarshalizer - } - - return []byte("obj"), nil - }, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizer, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blkc := createTestBlockchain() - - err := sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, errMarshalizer, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + errMarshalizer := errors.New("failure") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + if reflect.DeepEqual(obj, hdr) { + return nil, errMarshalizer + } + + return []byte("obj"), nil + }, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizer, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blkc := createTestBlockchain() + + err := sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, errMarshalizer, err) } func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - errPersister := errors.New("failure") - wasCalled := false - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - CommitCalled: func() ([]byte, error) { - return nil, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - hdrUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - HasCalled: func(key []byte) error { - return nil - }, - } - store := initStore() - store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err := sp.CommitBlock(blkc, hdr, body) - assert.True(t, wasCalled) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + errPersister := errors.New("failure") + wasCalled := false + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + CommitCalled: func() ([]byte, error) { + return nil, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + hdrUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + HasCalled: func(key []byte) error { + return nil + }, + } + store := initStore() + store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err := sp.CommitBlock(blkc, hdr, body) + assert.True(t, wasCalled) + assert.Nil(t, err) } func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - wasCalled := false - errPersister := errors.New("failure") - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - CommitCalled: func() (i []byte, e error) { - return nil, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{} - body := make(block.Body, 0) - body = append(body, &mb) - - miniBlockUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - } - store := initStore() - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, err) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err = sp.CommitBlock(blkc, hdr, body) - - assert.Nil(t, err) - assert.True(t, wasCalled) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + wasCalled := false + errPersister := errors.New("failure") + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + CommitCalled: func() (i []byte, e error) { + return nil, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{} + body := make(block.Body, 0) + body = append(body, &mb) + + miniBlockUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + } + store := initStore() + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, err) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err = sp.CommitBlock(blkc, hdr, body) + + assert.Nil(t, err) + assert.True(t, wasCalled) } func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := createTestBlockchain() - err := sp.CommitBlock(blkc, hdr, body) - - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return nil + } + blkc := createTestBlockchain() + err := sp.CommitBlock(blkc, hdr, body) + + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - txCache := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - LenCalled: func() int { - return 0 - }, - } - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return txCache - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { - }, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) { - - }, - } - } - - txHash := []byte("txHash") - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - - err = sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + txCache := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + LenCalled: func() int { + return 0 + }, + } + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return txCache + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { + }, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) { + + }, + } + } + + txHash := []byte("txHash") + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + + err = sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - randSeed := []byte("rand seed") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - RandSeed: randSeed, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: randSeed, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - forkDetectorAddCalled := false - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - if header == hdr { - forkDetectorAddCalled = true - return nil - } - - return errors.New("should have not got here") - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - assert.True(t, forkDetectorAddCalled) - assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) - //this should sleep as there is an async call to display current header and block in CommitBlock - time.Sleep(time.Second) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + forkDetectorAddCalled := false + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + if header == hdr { + forkDetectorAddCalled = true + return nil + } + + return errors.New("should have not got here") + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + assert.True(t, forkDetectorAddCalled) + assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) + //this should sleep as there is an async call to display current header and block in CommitBlock + time.Sleep(time.Second) } func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - randSeed := []byte("rand seed") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - RandSeed: randSeed, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: randSeed, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - var saveBlockCalled map[string]data.TransactionHandler - saveBlockCalledMutex := sync.Mutex{} - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{ - IndexerCalled: func() indexer.Indexer { - return &mock.IndexerMock{ - SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { - saveBlockCalledMutex.Lock() - saveBlockCalled = txPool - saveBlockCalledMutex.Unlock() - }, - } - }, - }, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{ - GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { - switch blockType { - case block.TxBlock: - return map[string]data.TransactionHandler{ - "tx_1": &transaction.Transaction{Nonce: 1}, - "tx_2": &transaction.Transaction{Nonce: 2}, - } - case block.SmartContractResultBlock: - return map[string]data.TransactionHandler{ - "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, - "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, - } - default: - return nil - } - }, - }, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - - // Wait for the index block go routine to start - time.Sleep(time.Second * 2) - - saveBlockCalledMutex.Lock() - wasCalled := saveBlockCalled - saveBlockCalledMutex.Unlock() - - assert.Equal(t, 4, len(wasCalled)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + var saveBlockCalled map[string]data.TransactionHandler + saveBlockCalledMutex := sync.Mutex{} + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{ + IndexerCalled: func() indexer.Indexer { + return &mock.IndexerMock{ + SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { + saveBlockCalledMutex.Lock() + saveBlockCalled = txPool + saveBlockCalledMutex.Unlock() + }, + } + }, + }, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{ + GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { + switch blockType { + case block.TxBlock: + return map[string]data.TransactionHandler{ + "tx_1": &transaction.Transaction{Nonce: 1}, + "tx_2": &transaction.Transaction{Nonce: 2}, + } + case block.SmartContractResultBlock: + return map[string]data.TransactionHandler{ + "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, + "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, + } + default: + return nil + } + }, + }, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + + // Wait for the index block go routine to start + time.Sleep(time.Second * 2) + + saveBlockCalledMutex.Lock() + wasCalled := saveBlockCalled + saveBlockCalledMutex.Unlock() + + assert.Equal(t, 4, len(wasCalled)) } func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - bl, err := sp.CreateBlockBody(0, func() bool { return true }) - // nil block - assert.Nil(t, bl) - // error - assert.Equal(t, process.ErrAccountStateDirty, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + bl, err := sp.CreateBlockBody(0, func() bool { return true }) + // nil block + assert.Nil(t, bl) + // error + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - haveTime := func() bool { - return false - } - bl, err := sp.CreateBlockBody(0, haveTime) - // no error - assert.Equal(t, process.ErrTimeIsOut, err) - // no miniblocks - assert.Nil(t, bl) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + haveTime := func() bool { + return false + } + bl, err := sp.CreateBlockBody(0, haveTime) + // no error + assert.Equal(t, process.ErrTimeIsOut, err) + // no miniblocks + assert.Nil(t, bl) } func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - haveTime := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk, err := sp.CreateBlockBody(0, haveTime) - assert.NotNil(t, blk) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + haveTime := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk, err := sp.CreateBlockBody(0, haveTime) + assert.NotNil(t, blk) + assert.Nil(t, err) } //------- ComputeNewNoncePrevHash func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerStub{} - hasher := &mock.HasherStub{} - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr, txBlock := createTestHdrTxBlockBody() - marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return []byte("hdrHeaderMarshalized"), nil - } - if reflect.DeepEqual(txBlock, obj) { - return []byte("txBlockBodyMarshalized"), nil - } - return nil, nil - } - hasher.ComputeCalled = func(s string) []byte { - if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") - } - if s == "txBlockBodyMarshalized" { - return []byte("tx block body hash") - } - return nil - } - _, err := be.ComputeHeaderHash(hdr) - assert.Nil(t, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerStub{} + hasher := &mock.HasherStub{} + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr, txBlock := createTestHdrTxBlockBody() + marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { + if hdr == obj { + return []byte("hdrHeaderMarshalized"), nil + } + if reflect.DeepEqual(txBlock, obj) { + return []byte("txBlockBodyMarshalized"), nil + } + return nil, nil + } + hasher.ComputeCalled = func(s string) []byte { + if s == "hdrHeaderMarshalized" { + return []byte("hdr hash") + } + if s == "txBlockBodyMarshalized" { + return []byte("tx block body hash") + } + return nil + } + _, err := be.ComputeHeaderHash(hdr) + assert.Nil(t, err) } func createTestHdrTxBlockBody() (*block.Header, block.Body) { - hasher := mock.HasherMock{} - hdr := &block.Header{ - Nonce: 1, - ShardId: 2, - Epoch: 3, - Round: 4, - TimeStamp: uint64(11223344), - PrevHash: hasher.Compute("prev hash"), - PubKeysBitmap: []byte{255, 0, 128}, - Signature: hasher.Compute("signature"), - RootHash: hasher.Compute("root hash"), - } - txBlock := block.Body{ - { - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_0_1"), - hasher.Compute("txHash_0_2"), - }, - }, - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_1_1"), - hasher.Compute("txHash_1_2"), - }, - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_2_1"), - }, - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - return hdr, txBlock + hasher := mock.HasherMock{} + hdr := &block.Header{ + Nonce: 1, + ShardId: 2, + Epoch: 3, + Round: 4, + TimeStamp: uint64(11223344), + PrevHash: hasher.Compute("prev hash"), + PubKeysBitmap: []byte{255, 0, 128}, + Signature: hasher.Compute("signature"), + RootHash: hasher.Compute("root hash"), + } + txBlock := block.Body{ + { + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_0_1"), + hasher.Compute("txHash_0_2"), + }, + }, + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_1_1"), + hasher.Compute("txHash_1_2"), + }, + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_2_1"), + }, + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + return hdr, txBlock } //------- ComputeNewNoncePrevHash func TestShardProcessor_DisplayLogInfo(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - hasher := mock.HasherMock{} - hdr, txBlock := createTestHdrTxBlockBody() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.NotNil(t, sp) - hdr.PrevHash = hasher.Compute("prev hash") - sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + hasher := mock.HasherMock{} + hdr, txBlock := createTestHdrTxBlockBody() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.NotNil(t, sp) + hdr.PrevHash = hasher.Compute("prev hash") + sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) } func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.NotNil(t, mbHeaders) - assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.NotNil(t, mbHeaders) + assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) } func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{Fail: true}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.NotNil(t, err) - assert.Nil(t, mbHeaders) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{Fail: true}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.NotNil(t, err) + assert.Nil(t, mbHeaders) } func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) } func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) { - t.Parallel() - // set accounts dirty - journalEntries := 3 - revToSnapshot := func(snapshot int) error { - journalEntries = 0 - return nil - } - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := bp.CommitBlock(nil, nil, nil) - assert.NotNil(t, err) - assert.Equal(t, 0, journalEntries) + t.Parallel() + // set accounts dirty + journalEntries := 3 + revToSnapshot := func(snapshot int) error { + journalEntries = 0 + return nil + } + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := bp.CommitBlock(nil, nil, nil) + assert.NotNil(t, err) + assert.Equal(t, 0, journalEntries) } func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - txHash1 := []byte("txHash1") - mb1 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash1}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - body = append(body, &mb1) - body = append(body, &mb0) - body = append(body, &mb1) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.NotNil(t, msh) - assert.NotNil(t, mstx) - _, found := msh[0] - assert.False(t, found) - - expectedBody := make(block.Body, 0) - err = marshalizer.Unmarshal(&expectedBody, msh[1]) - assert.Nil(t, err) - assert.Equal(t, len(expectedBody), 2) - assert.Equal(t, &mb1, expectedBody[0]) - assert.Equal(t, &mb1, expectedBody[1]) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + txHash1 := []byte("txHash1") + mb1 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash1}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + body = append(body, &mb1) + body = append(body, &mb0) + body = append(body, &mb1) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.NotNil(t, msh) + assert.NotNil(t, mstx) + _, found := msh[0] + assert.False(t, found) + + expectedBody := make(block.Body, 0) + err = marshalizer.Unmarshal(&expectedBody, msh[1]) + assert.Nil(t, err) + assert.Equal(t, len(expectedBody), 2) + assert.Equal(t, &mb1, expectedBody[0]) + assert.Equal(t, &mb1, expectedBody[1]) } func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - wr := &wrongBody{} - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) - assert.Equal(t, process.ErrWrongTypeAssertion, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + wr := &wrongBody{} + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) + assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) } func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) - assert.Equal(t, process.ErrNilMiniBlocks, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) + assert.Equal(t, process.ErrNilMiniBlocks, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) } func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { - t.Parallel() - wasCalled := false - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - wasCalled = true - return nil, process.ErrMarshalWithoutSuccess - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.True(t, wasCalled) - assert.Equal(t, 0, len(msh)) - assert.Equal(t, 0, len(mstx)) + t.Parallel() + wasCalled := false + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + wasCalled = true + return nil, process.ErrMarshalWithoutSuccess + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.True(t, wasCalled) + assert.Equal(t, 0, len(msh)) + assert.Equal(t, 0, len(mstx)) } //------- receivedMetaBlock func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - miniBlockHash2 := []byte("miniblock hash 2") - miniBlockHash3 := []byte("miniblock hash 3") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - string(miniBlockHash2): 0, - string(miniBlockHash3): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - miniBlockHash1Requested := int32(0) - miniBlockHash2Requested := int32(0) - miniBlockHash3Requested := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - if bytes.Equal(miniBlockHash1, miniblockHash) { - atomic.AddInt32(&miniBlockHash1Requested, 1) - } - if bytes.Equal(miniBlockHash2, miniblockHash) { - atomic.AddInt32(&miniBlockHash2Requested, 1) - } - if bytes.Equal(miniBlockHash3, miniblockHash) { - atomic.AddInt32(&miniBlockHash3Requested, 1) - } - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - bp.ReceivedMetaBlock(metaBlockHash) - - //we have to wait to be sure txHash1Requested is not incremented by a late call - time.Sleep(time.Second) - - assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + miniBlockHash2 := []byte("miniblock hash 2") + miniBlockHash3 := []byte("miniblock hash 3") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + string(miniBlockHash2): 0, + string(miniBlockHash3): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + miniBlockHash1Requested := int32(0) + miniBlockHash2Requested := int32(0) + miniBlockHash3Requested := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + if bytes.Equal(miniBlockHash1, miniblockHash) { + atomic.AddInt32(&miniBlockHash1Requested, 1) + } + if bytes.Equal(miniBlockHash2, miniblockHash) { + atomic.AddInt32(&miniBlockHash2Requested, 1) + } + if bytes.Equal(miniBlockHash3, miniblockHash) { + atomic.AddInt32(&miniBlockHash3Requested, 1) + } + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + bp.ReceivedMetaBlock(metaBlockHash) + + //we have to wait to be sure txHash1Requested is not incremented by a late call + time.Sleep(time.Second) + + assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) } //--------- receivedMetaBlockNoMissingMiniBlocks func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - noOfMissingMiniBlocks := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - atomic.AddInt32(&noOfMissingMiniBlocks, 1) - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - sp.ReceivedMetaBlock(metaBlockHash) - assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + noOfMissingMiniBlocks := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + atomic.AddInt32(&noOfMissingMiniBlocks, 1) + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + sp.ReceivedMetaBlock(metaBlockHash) + assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) } //--------- createAndProcessCrossMiniBlocksDstMe func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - haveTimeTrue := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - assert.Equal(t, err == nil, true) - assert.Equal(t, len(miniBlockSlice) == 0, true) - assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) - assert.Equal(t, noOfTxs, uint32(0)) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + haveTimeTrue := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + assert.Equal(t, err == nil, true) + assert.Equal(t, len(miniBlockSlice) == 0, true) + assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) + assert.Equal(t, noOfTxs, uint32(0)) } func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte(nil) - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - - startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - startHeaders[sharding.MetachainShardId] = &block.Header{} - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - startHeaders, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, sp) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte(nil) + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + + startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + startHeaders[sharding.MetachainShardId] = &block.Header{} + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + startHeaders, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, sp) + assert.Equal(t, process.ErrWrongTypeAssertion, err) } func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlocksInMetaBlock(t *testing.T) { - t.Parallel() - - haveTimeTrue := func() bool { - return true - } - tdp := mock.NewPoolsHolderMock() - destShardId := uint32(2) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - miniblocks := make([]*block.MiniBlock, 6) - - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - } - - //put 2 metablocks in pool - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - - mb1Hash := []byte("meta block 1") - tdp.MetaBlocks().Put( - mb1Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 2, - } - - mb2Hash := []byte("meta block 2") - tdp.MetaBlocks().Put( - mb2Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 3, - ShardInfo: make([]block.ShardData, 0), - Round: 3, - PrevRandSeed: []byte("roothash"), - } - - mb3Hash := []byte("meta block 3") - tdp.MetaBlocks().Put( - mb3Hash, - meta, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - - assert.Equal(t, 0, len(miniBlocksReturned)) - assert.Equal(t, 0, len(usedMetaHdrsHashes)) - assert.Equal(t, uint32(0), nrTxAdded) - assert.Nil(t, err) + t.Parallel() + + haveTimeTrue := func() bool { + return true + } + tdp := mock.NewPoolsHolderMock() + destShardId := uint32(2) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + miniblocks := make([]*block.MiniBlock, 6) + + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + } + + //put 2 metablocks in pool + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + + mb1Hash := []byte("meta block 1") + tdp.MetaBlocks().Put( + mb1Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 2, + } + + mb2Hash := []byte("meta block 2") + tdp.MetaBlocks().Put( + mb2Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 3, + ShardInfo: make([]block.ShardData, 0), + Round: 3, + PrevRandSeed: []byte("roothash"), + } + + mb3Hash := []byte("meta block 3") + tdp.MetaBlocks().Put( + mb3Hash, + meta, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + + assert.Equal(t, 0, len(miniBlocksReturned)) + assert.Equal(t, 0, len(usedMetaHdrsHashes)) + assert.Equal(t, uint32(0), nrTxAdded) + assert.Nil(t, err) } //------- createMiniBlocks func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a 3 txs in pool - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(0) - receiverShardId := uint32(0) - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - tx1ExecutionResult := uint64(0) - tx2ExecutionResult := uint64(0) - tx3ExecutionResult := uint64(0) - - txProcessorMock := &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { - tx1ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash2) { - tx2ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash3) { - tx3ExecutionResult = transaction.Nonce - } - - return nil - }, - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - accntAdapter := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accntAdapter, - &mock.RequestHandlerMock{}, - txProcessorMock, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accntAdapter, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - accntAdapter, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) - - assert.Nil(t, err) - //testing execution - assert.Equal(t, tx1Nonce, tx1ExecutionResult) - assert.Equal(t, tx2Nonce, tx2ExecutionResult) - assert.Equal(t, tx3Nonce, tx3ExecutionResult) - //one miniblock output - assert.Equal(t, 1, len(blockBody)) - //miniblock should have 3 txs - assert.Equal(t, 3, len(blockBody[0].TxHashes)) - //testing all 3 hashes are present in block body - assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a 3 txs in pool + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(0) + receiverShardId := uint32(0) + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + tx1ExecutionResult := uint64(0) + tx2ExecutionResult := uint64(0) + tx3ExecutionResult := uint64(0) + + txProcessorMock := &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + //execution, in this context, means moving the tx nonce to itx corresponding execution result variable + if transaction.Data == string(txHash1) { + tx1ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash2) { + tx2ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash3) { + tx3ExecutionResult = transaction.Nonce + } + + return nil + }, + } + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + accntAdapter := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accntAdapter, + &mock.RequestHandlerMock{}, + txProcessorMock, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accntAdapter, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + accntAdapter, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) + + assert.Nil(t, err) + //testing execution + assert.Equal(t, tx1Nonce, tx1ExecutionResult) + assert.Equal(t, tx2Nonce, tx2ExecutionResult) + assert.Equal(t, tx3Nonce, tx3ExecutionResult) + //one miniblock output + assert.Equal(t, 1, len(blockBody)) + //miniblock should have 3 txs + assert.Equal(t, 3, len(blockBody[0].TxHashes)) + //testing all 3 hashes are present in block body + assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) } func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { - t.Parallel() - - //we have 3 metablocks in pool each containing 2 miniblocks. - //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks - //The test should remove only one metablock - - destShardId := uint32(2) - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - miniblocks := make([]*block.MiniBlock, 6) - miniblockHashes := make([][]byte, 6) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - miniblocks[i] = mb - miniblockHashes[i] = hash - } - - //put 3 metablocks in pool - mb1Hash := []byte("meta block 1") - dataPool.MetaBlocks().Put( - mb1Hash, - createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), - ) - mb2Hash := []byte("meta block 2") - dataPool.MetaBlocks().Put( - mb2Hash, - createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), - ) - mb3Hash := []byte("meta block 3") - dataPool.MetaBlocks().Put( - mb3Hash, - createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), - ) - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.CurrentShard = destShardId - shardCoordinator.SetNoShards(destShardId + 1) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - }, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - //create block body with first 3 miniblocks from miniblocks var - blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} - - hashes := make([][]byte, 0) - hashes = append(hashes, mb1Hash) - hashes = append(hashes, mb2Hash) - hashes = append(hashes, mb3Hash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) - - assert.Nil(t, err) - //check WasMiniBlockProcessed for remaining metablocks - metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) - assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) - assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) - - metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) + t.Parallel() + + //we have 3 metablocks in pool each containing 2 miniblocks. + //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks + //The test should remove only one metablock + + destShardId := uint32(2) + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + miniblocks := make([]*block.MiniBlock, 6) + miniblockHashes := make([][]byte, 6) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + miniblocks[i] = mb + miniblockHashes[i] = hash + } + + //put 3 metablocks in pool + mb1Hash := []byte("meta block 1") + dataPool.MetaBlocks().Put( + mb1Hash, + createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), + ) + mb2Hash := []byte("meta block 2") + dataPool.MetaBlocks().Put( + mb2Hash, + createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), + ) + mb3Hash := []byte("meta block 3") + dataPool.MetaBlocks().Put( + mb3Hash, + createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), + ) + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.CurrentShard = destShardId + shardCoordinator.SetNoShards(destShardId + 1) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + //create block body with first 3 miniblocks from miniblocks var + blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} + + hashes := make([][]byte, 0) + hashes = append(hashes, mb1Hash) + hashes = append(hashes, mb2Hash) + hashes = append(hashes, mb3Hash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) + + assert.Nil(t, err) + //check WasMiniBlockProcessed for remaining metablocks + metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) + assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) + assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) + + metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := be.RestoreBlockIntoPools(nil, nil) - assert.NotNil(t, err) - assert.Equal(t, process.ErrNilBlockHeader, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := be.RestoreBlockIntoPools(nil, nil) + assert.NotNil(t, err) + assert.Equal(t, process.ErrNilBlockHeader, err) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.RestoreBlockIntoPools(&block.Header{}, nil) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilTxBlockBody) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.RestoreBlockIntoPools(&block.Header{}, nil) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilTxBlockBody) } func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { - t.Parallel() - - txHash := []byte("tx hash 1") - - dataPool := mock.NewPoolsHolderMock() - marshalizerMock := &mock.MarshalizerMock{} - hasherMock := &mock.HasherStub{} - - body := make(block.Body, 0) - tx := transaction.Transaction{Nonce: 1} - buffTx, _ := marshalizerMock.Marshal(tx) - - store := &mock.ChainStorerMock{ - GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { - m := make(map[string][]byte, 0) - m[string(txHash)] = buffTx - return m, nil - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizerMock, - hasherMock, - dataPool, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasherMock, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - miniblockHash := []byte("mini block hash 1") - hasherMock.ComputeCalled = func(s string) []byte { - return miniblockHash - } - - metablockHash := []byte("meta block hash 1") - metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - metablockHeader.SetMiniBlockProcessed(metablockHash, true) - dataPool.MetaBlocks().Put( - metablockHash, - metablockHeader, - ) - - err = sp.RestoreBlockIntoPools(&block.Header{}, body) - - miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) - txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) - metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) - metablock := metablockFromPool.(*block.MetaBlock) - assert.Nil(t, err) - assert.Equal(t, &miniblock, miniblockFromPool) - assert.Equal(t, &tx, txFromPool) - assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) + t.Parallel() + + txHash := []byte("tx hash 1") + + dataPool := mock.NewPoolsHolderMock() + marshalizerMock := &mock.MarshalizerMock{} + hasherMock := &mock.HasherStub{} + + body := make(block.Body, 0) + tx := transaction.Transaction{Nonce: 1} + buffTx, _ := marshalizerMock.Marshal(tx) + + store := &mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + m := make(map[string][]byte, 0) + m[string(txHash)] = buffTx + return m, nil + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizerMock, + hasherMock, + dataPool, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasherMock, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + miniblockHash := []byte("mini block hash 1") + hasherMock.ComputeCalled = func(s string) []byte { + return miniblockHash + } + + metablockHash := []byte("meta block hash 1") + metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) + metablockHeader.SetMiniBlockProcessed(metablockHash, true) + dataPool.MetaBlocks().Put( + metablockHash, + metablockHeader, + ) + + err = sp.RestoreBlockIntoPools(&block.Header{}, body) + + miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) + txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) + metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) + metablock := metablockFromPool.(*block.MetaBlock) + assert.Nil(t, err) + assert.Equal(t, &miniblock, miniblockFromPool) + assert.Equal(t, &tx, txFromPool) + assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) } func TestShardProcessor_DecodeBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := make(block.Body, 0) - body = append(body, &block.MiniBlock{ReceiverShardID: 69}) - message, err := marshalizerMock.Marshal(body) - assert.Nil(t, err) - - dcdBlk := sp.DecodeBlockBody(nil) - assert.Nil(t, dcdBlk) - - dcdBlk = sp.DecodeBlockBody(message) - assert.Equal(t, body, dcdBlk) - assert.Equal(t, uint32(69), body[0].ReceiverShardID) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := make(block.Body, 0) + body = append(body, &block.MiniBlock{ReceiverShardID: 69}) + message, err := marshalizerMock.Marshal(body) + assert.Nil(t, err) + + dcdBlk := sp.DecodeBlockBody(nil) + assert.Nil(t, dcdBlk) + + dcdBlk = sp.DecodeBlockBody(message) + assert.Equal(t, body, dcdBlk) + assert.Equal(t, uint32(69), body[0].ReceiverShardID) } func TestShardProcessor_DecodeBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr := &block.Header{} - hdr.Nonce = 1 - hdr.TimeStamp = uint64(0) - hdr.Signature = []byte("A") - message, err := marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - message, err = marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - dcdHdr := sp.DecodeBlockHeader(nil) - assert.Nil(t, dcdHdr) - - dcdHdr = sp.DecodeBlockHeader(message) - assert.Equal(t, hdr, dcdHdr) - assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr := &block.Header{} + hdr.Nonce = 1 + hdr.TimeStamp = uint64(0) + hdr.Signature = []byte("A") + message, err := marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + message, err = marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + dcdHdr := sp.DecodeBlockHeader(nil) + assert.Nil(t, dcdHdr) + + dcdHdr = sp.DecodeBlockHeader(message) + assert.Equal(t, hdr, dcdHdr) + assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) } func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := initDataPool([]byte("tx_hash1")) - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - lastHdr := &block.MetaBlock{Round: 9, - Nonce: 44, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - err := sp.IsHdrConstructionValid(nil, prevHdr) - assert.Equal(t, err, process.ErrNilBlockHeader) - - err = sp.IsHdrConstructionValid(currHdr, nil) - assert.Equal(t, err, process.ErrNilBlockHeader) - - currHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - prevHdr.RootHash = nil - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) - - currHdr.Nonce = 46 - prevHdr.Nonce = 45 - prevHdr.Round = currHdr.Round + 1 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) - - prevHdr.Round = currHdr.Round - 1 - currHdr.Nonce = prevHdr.Nonce + 2 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) - - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) - - currHdr.PrevHash = prevHash - prevHdr.RootHash = []byte("prevRootHash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := initDataPool([]byte("tx_hash1")) + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + lastHdr := &block.MetaBlock{Round: 9, + Nonce: 44, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //put the existing headers inside datapool + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + + err := sp.IsHdrConstructionValid(nil, prevHdr) + assert.Equal(t, err, process.ErrNilBlockHeader) + + err = sp.IsHdrConstructionValid(currHdr, nil) + assert.Equal(t, err, process.ErrNilBlockHeader) + + currHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRootStateMissmatch) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + prevHdr.RootHash = nil + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) + + currHdr.Nonce = 46 + prevHdr.Nonce = 45 + prevHdr.Round = currHdr.Round + 1 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + + prevHdr.Round = currHdr.Round - 1 + currHdr.Nonce = prevHdr.Nonce + 2 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = prevHdr.Nonce + 1 + prevHdr.RandSeed = []byte("randomwrong") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRandSeedMismatch) + + prevHdr.RandSeed = currRandSeed + currHdr.PrevHash = []byte("wronghash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + + currHdr.PrevHash = prevHash + prevHdr.RootHash = []byte("prevRootHash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - shardHdr := &block.Header{Round: 15} - shardBlock := block.Body{} - - blockHeader := &block.Header{} - - // test header not in pool and defer called - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - assert.Equal(t, 0, len(processedMetaHdrs)) - - // wrong header type in pool and defer called - dataPool.MetaBlocks().Put(currHash, shardHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Equal(t, nil, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes = make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + shardHdr := &block.Header{Round: 15} + shardBlock := block.Body{} + + blockHeader := &block.Header{} + + // test header not in pool and defer called + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + assert.Equal(t, 0, len(processedMetaHdrs)) + + // wrong header type in pool and defer called + dataPool.MetaBlocks().Put(currHash, shardHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Equal(t, nil, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes = make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func createShardData(hasher hashing.Hasher, marshalizer marshal.Marshalizer, miniBlocks []block.MiniBlock) []block.ShardData { - shardData := make([]block.ShardData, len(miniBlocks)) - for i := 0; i < len(miniBlocks); i++ { - marshaled, _ := marshalizer.Marshal(miniBlocks[i]) - hashed := hasher.Compute(string(marshaled)) - - shardMBHeader := block.ShardMiniBlockHeader{ - ReceiverShardId: miniBlocks[i].ReceiverShardID, - SenderShardId: miniBlocks[i].SenderShardID, - TxCount: uint32(len(miniBlocks[i].TxHashes)), - Hash: hashed, - } - shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) - shardMBHeaders = append(shardMBHeaders, shardMBHeader) - - shardData[0].ShardId = miniBlocks[i].SenderShardID - shardData[0].TxCount = 10 - shardData[0].HeaderHash = []byte("headerHash") - shardData[0].ShardMiniBlockHeaders = shardMBHeaders - } - - return shardData + shardData := make([]block.ShardData, len(miniBlocks)) + for i := 0; i < len(miniBlocks); i++ { + marshaled, _ := marshalizer.Marshal(miniBlocks[i]) + hashed := hasher.Compute(string(marshaled)) + + shardMBHeader := block.ShardMiniBlockHeader{ + ReceiverShardId: miniBlocks[i].ReceiverShardID, + SenderShardId: miniBlocks[i].SenderShardID, + TxCount: uint32(len(miniBlocks[i].TxHashes)), + Hash: hashed, + } + shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) + shardMBHeaders = append(shardMBHeaders, shardMBHeader) + + shardData[0].ShardId = miniBlocks[i].SenderShardID + shardData[0].TxCount = 10 + shardData[0].HeaderHash = []byte("headerHash") + shardData[0].ShardMiniBlockHeaders = shardMBHeaders + } + + return shardData } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 2, putCalledNr) - - assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 2, putCalledNr) + + assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ - Round: 12, - PrevRandSeed: []byte("nextrand"), - PrevHash: currHash, - Nonce: 47}) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - assert.Equal(t, 2, len(processedMetaHdrs)) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ + Round: 12, + PrevRandSeed: []byte("nextrand"), + PrevHash: currHash, + Nonce: 47}) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + assert.Equal(t, 2, len(processedMetaHdrs)) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) } func createOneHeaderOneBody() (*block.Header, block.Body) { - txHash := []byte("tx_hash1") - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := &block.Header{ - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - return hdr, body + txHash := []byte("tx_hash1") + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := &block.Header{ + Nonce: 1, + PrevHash: []byte(""), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + + return hdr, body } func TestShardProcessor_CheckHeaderBodyCorrelationReceiverMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationSenderMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationTxCountMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationHashMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } func TestShardProcessor_CheckHeaderBodyCorrelationShouldPass(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Nil(t, err) + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Nil(t, err) } func TestShardProcessor_restoreMetaBlockIntoPoolShouldPass(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - poolFake := mock.NewPoolsHolderMock() - - metaBlock := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - poolFake, - &mock.ChainStorerMock{ - GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { - return marshalizer.Marshal(&metaBlock) - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - }, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniblockHashes := make(map[string]uint32, 0) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - hasher := &mock.HasherStub{} - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, nil, metaBlockRestored) - assert.False(t, ok) - - err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, &metaBlock, metaBlockRestored) - assert.Nil(t, err) + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + poolFake := mock.NewPoolsHolderMock() + + metaBlock := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + poolFake, + &mock.ChainStorerMock{ + GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + return marshalizer.Marshal(&metaBlock) + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + }, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniblockHashes := make(map[string]uint32, 0) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + hasher := &mock.HasherStub{} + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, nil, metaBlockRestored) + assert.False(t, ok) + + err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + + metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, &metaBlock, metaBlockRestored) + assert.Nil(t, err) } func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - txHash := []byte("tx_hash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - hasher := &mock.HasherStub{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: 0, - SenderShardId: 2, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardId: 1, - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - idp := initDataPool([]byte("tx_hash1")) - idp.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{ - Nonce: 1, - Round: 1, - ShardInfo: shardHdrs, - }, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - return true - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - idp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - meta := block.MetaBlock{ - Nonce: 0, - ShardInfo: make([]block.ShardData, 0), - } - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) - - assert.Equal(t, 1, len(orderedMetaBlocks)) - assert.Equal(t, orderedMetaBlocks[""], metaHash) - assert.Nil(t, err) + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + txHash := []byte("tx_hash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + hasher := &mock.HasherStub{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: 0, + SenderShardId: 2, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + idp := initDataPool([]byte("tx_hash1")) + idp.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: shardHdrs, + }, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + return true + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + idp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + meta := block.MetaBlock{ + Nonce: 0, + ShardInfo: make([]block.ShardData, 0), + } + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) + + assert.Equal(t, 1, len(orderedMetaBlocks)) + assert.Equal(t, orderedMetaBlocks[""], metaHash) + assert.Nil(t, err) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t *testing.T) { - t.Parallel() - - dataPool := initDataPool([]byte("tx_hash1")) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := initDataPool([]byte("tx_hash1")) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) - _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) + _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) } func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - ownHdr := &block.Header{ - Nonce: 1, - Round: 1, - } - ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) - _ = dataPool.Headers().Put(ownHash, ownHdr) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - ownHdr = &block.Header{ - Nonce: 2, - Round: 2, - } - ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) - mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) - _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) - - shardInfo = make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 3, - Epoch: 0, - Round: 3, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + ownHdr := &block.Header{ + Nonce: 1, + Round: 1, + } + ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) + _ = dataPool.Headers().Put(ownHash, ownHdr) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + ownHdr = &block.Header{ + Nonce: 2, + Round: 2, + } + ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) + mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) + _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) + + shardInfo = make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 3, + Epoch: 0, + Round: 3, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index d8d8c15b749..fb2c460da86 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,829 +1,829 @@ package coordinator import ( - "fmt" - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "fmt" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type transactionCoordinator struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - miniBlockPool storage.Cacher + shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + miniBlockPool storage.Cacher - mutPreProcessor sync.RWMutex - txPreProcessors map[block.Type]process.PreProcessor - keysTxPreProcs []block.Type + mutPreProcessor sync.RWMutex + txPreProcessors map[block.Type]process.PreProcessor + keysTxPreProcs []block.Type - mutInterimProcessors sync.RWMutex - interimProcessors map[block.Type]process.IntermediateTransactionHandler - keysInterimProcs []block.Type + mutInterimProcessors sync.RWMutex + interimProcessors map[block.Type]process.IntermediateTransactionHandler + keysInterimProcs []block.Type - mutRequestedTxs sync.RWMutex - requestedTxs map[block.Type]int + mutRequestedTxs sync.RWMutex + requestedTxs map[block.Type]int - onRequestMiniBlock func(shardId uint32, mbHash []byte) + onRequestMiniBlock func(shardId uint32, mbHash []byte) } var log = logger.DefaultLogger() // NewTransactionCoordinator creates a transaction coordinator to run and coordinate preprocessors and processors func NewTransactionCoordinator( - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - dataPool dataRetriever.PoolsHolder, - requestHandler process.RequestHandler, - preProcessors process.PreProcessorsContainer, - interProcessors process.IntermediateProcessorContainer, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + dataPool dataRetriever.PoolsHolder, + requestHandler process.RequestHandler, + preProcessors process.PreProcessorsContainer, + interProcessors process.IntermediateProcessorContainer, ) (*transactionCoordinator, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - if interProcessors == nil || interProcessors.IsInterfaceNil() { - return nil, process.ErrNilIntermediateProcessorContainer - } - if preProcessors == nil || preProcessors.IsInterfaceNil() { - return nil, process.ErrNilPreProcessorsContainer - } - - tc := &transactionCoordinator{ - shardCoordinator: shardCoordinator, - accounts: accounts, - } - - tc.miniBlockPool = dataPool.MiniBlocks() - if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { - return nil, process.ErrNilMiniBlockPool - } - - tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) - - tc.onRequestMiniBlock = requestHandler.RequestMiniBlock - tc.requestedTxs = make(map[block.Type]int) - tc.txPreProcessors = make(map[block.Type]process.PreProcessor) - tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) - - tc.keysTxPreProcs = preProcessors.Keys() - sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { - return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] - }) - for _, value := range tc.keysTxPreProcs { - preProc, err := preProcessors.Get(value) - if err != nil { - return nil, err - } - tc.txPreProcessors[value] = preProc - } - - tc.keysInterimProcs = interProcessors.Keys() - sort.Slice(tc.keysInterimProcs, func(i, j int) bool { - return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] - }) - for _, value := range tc.keysInterimProcs { - interProc, err := interProcessors.Get(value) - if err != nil { - return nil, err - } - tc.interimProcessors[value] = interProc - } - - return tc, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + if interProcessors == nil || interProcessors.IsInterfaceNil() { + return nil, process.ErrNilIntermediateProcessorContainer + } + if preProcessors == nil || preProcessors.IsInterfaceNil() { + return nil, process.ErrNilPreProcessorsContainer + } + + tc := &transactionCoordinator{ + shardCoordinator: shardCoordinator, + accounts: accounts, + } + + tc.miniBlockPool = dataPool.MiniBlocks() + if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { + return nil, process.ErrNilMiniBlockPool + } + + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) + + tc.onRequestMiniBlock = requestHandler.RequestMiniBlock + tc.requestedTxs = make(map[block.Type]int) + tc.txPreProcessors = make(map[block.Type]process.PreProcessor) + tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) + + tc.keysTxPreProcs = preProcessors.Keys() + sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { + return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] + }) + for _, value := range tc.keysTxPreProcs { + preProc, err := preProcessors.Get(value) + if err != nil { + return nil, err + } + tc.txPreProcessors[value] = preProc + } + + tc.keysInterimProcs = interProcessors.Keys() + sort.Slice(tc.keysInterimProcs, func(i, j int) bool { + return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] + }) + for _, value := range tc.keysInterimProcs { + interProc, err := interProcessors.Get(value) + if err != nil { + return nil, err + } + tc.interimProcessors[value] = interProc + } + + return tc, nil } // separateBodyByType creates a map of bodies according to type func (tc *transactionCoordinator) separateBodyByType(body block.Body) map[block.Type]block.Body { - separatedBodies := make(map[block.Type]block.Body) + separatedBodies := make(map[block.Type]block.Body) - for i := 0; i < len(body); i++ { - mb := body[i] + for i := 0; i < len(body); i++ { + mb := body[i] - if separatedBodies[mb.Type] == nil { - separatedBodies[mb.Type] = block.Body{} - } + if separatedBodies[mb.Type] == nil { + separatedBodies[mb.Type] = block.Body{} + } - separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) - } + separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) + } - return separatedBodies + return separatedBodies } // initRequestedTxs init the requested txs number func (tc *transactionCoordinator) initRequestedTxs() { - tc.mutRequestedTxs.Lock() - tc.requestedTxs = make(map[block.Type]int) - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs = make(map[block.Type]int) + tc.mutRequestedTxs.Unlock() } // RequestBlockTransactions verifies missing transaction and requests them func (tc *transactionCoordinator) RequestBlockTransactions(body block.Body) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - tc.initRequestedTxs() + tc.initRequestedTxs() - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } - requestedTxs := preproc.RequestBlockTransactions(blockBody) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } + requestedTxs := preproc.RequestBlockTransactions(blockBody) - tc.mutRequestedTxs.Lock() - tc.requestedTxs[blockType] = requestedTxs - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs[blockType] = requestedTxs + tc.mutRequestedTxs.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() } // IsDataPreparedForProcessing verifies if all the needed data is prepared func (tc *transactionCoordinator) IsDataPreparedForProcessing(haveTime func() time.Duration) error { - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} + wg := sync.WaitGroup{} - tc.mutRequestedTxs.RLock() - wg.Add(len(tc.requestedTxs)) + tc.mutRequestedTxs.RLock() + wg.Add(len(tc.requestedTxs)) - for key, value := range tc.requestedTxs { - go func(blockType block.Type, requestedTxs int) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() + for key, value := range tc.requestedTxs { + go func(blockType block.Type, requestedTxs int) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() - return - } + return + } - err := preproc.IsDataPrepared(requestedTxs, haveTime) - if err != nil { - log.Debug(err.Error()) + err := preproc.IsDataPrepared(requestedTxs, haveTime) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - tc.mutRequestedTxs.RUnlock() - wg.Wait() + tc.mutRequestedTxs.RUnlock() + wg.Wait() - return errFound + return errFound } // SaveBlockDataToStorage saves the data from block body into storage units func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) - - var errFound error - errMutex := sync.Mutex{} - - wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) - - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } - - err := preproc.SaveTxBlockToStorage(blockBody) - if err != nil { - log.Debug(err.Error()) - - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - - wg.Done() - }(key, value) - } - - wg.Wait() - - intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreprocSC == nil { - return errFound - } - - err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) - - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - - intermediatePreproc := tc.getInterimProcessor(block.RewardsBlock) - if intermediatePreproc == nil { - return errFound - } - - err = intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) - - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - - return errFound + separatedBodies := tc.separateBodyByType(body) + + var errFound error + errMutex := sync.Mutex{} + + wg := sync.WaitGroup{} + // Length of body types + another go routine for the intermediate transactions + wg.Add(len(separatedBodies)) + + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } + + err := preproc.SaveTxBlockToStorage(blockBody) + if err != nil { + log.Debug(err.Error()) + + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + + wg.Done() + }(key, value) + } + + wg.Wait() + + intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) + if intermediatePreprocSC == nil { + return errFound + } + + err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) + + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + + intermediatePreproc := tc.getInterimProcessor(block.RewardsBlock) + if intermediatePreproc == nil { + return errFound + } + + err = intermediatePreproc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) + + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + + return errFound } // RestoreBlockDataFromStorage restores block data from storage to pool func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - localMutex := sync.Mutex{} - totalRestoredTx := 0 - restoredMbHashes := make(map[int][][]byte) + var errFound error + localMutex := sync.Mutex{} + totalRestoredTx := 0 + restoredMbHashes := make(map[int][][]byte) - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - restoredMbs := make(map[int][]byte) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + restoredMbs := make(map[int][]byte) - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } - restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - localMutex.Lock() - errFound = err - localMutex.Unlock() - } + localMutex.Lock() + errFound = err + localMutex.Unlock() + } - localMutex.Lock() - totalRestoredTx += restoredTxs + localMutex.Lock() + totalRestoredTx += restoredTxs - for shId, mbHash := range restoredMbs { - restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) - } + for shId, mbHash := range restoredMbs { + restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) + } - localMutex.Unlock() + localMutex.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return totalRestoredTx, restoredMbHashes, errFound + return totalRestoredTx, restoredMbHashes, errFound } // RemoveBlockDataFromPool deletes block data from pools func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - wg.Done() - return - } + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + wg.Done() + return + } - err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return errFound + return errFound } // ProcessBlockTransaction processes transactions and updates state tries func (tc *transactionCoordinator) ProcessBlockTransaction( - body block.Body, - round uint64, - haveTime func() time.Duration, + body block.Body, + round uint64, + haveTime func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysTxPreProcs { - if separatedBodies[blockType] == nil { - continue - } - if blockType == block.RewardsBlock { - continue - } - - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - return process.ErrMissingPreProcessor - } - - err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) - if err != nil { - return err - } - } - - // create the reward txs and make them available for processing - mbRewards := tc.createRewardsMiniBlocks() - preproc := tc.getPreProcessor(block.RewardsBlock) - rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) - if !ok { - return process.ErrWrongTypeAssertion - } - - rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) - - err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlock], round, haveTime) - if err != nil { - return err - } - - return nil + separatedBodies := tc.separateBodyByType(body) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysTxPreProcs { + if separatedBodies[blockType] == nil { + continue + } + if blockType == block.RewardsBlock { + continue + } + + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + return process.ErrMissingPreProcessor + } + + err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) + if err != nil { + return err + } + } + + // create the reward txs and make them available for processing + mbRewards := tc.createRewardsMiniBlocks() + preproc := tc.getPreProcessor(block.RewardsBlock) + rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) + if !ok { + return process.ErrWrongTypeAssertion + } + + rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) + + err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlock], round, haveTime) + if err != nil { + return err + } + + return nil } // CreateMbsAndProcessCrossShardTransactionsDstMe creates miniblocks and processes cross shard transaction // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr data.HeaderHandler, - maxTxRemaining uint32, - maxMbRemaining uint32, - round uint64, - haveTime func() bool, + hdr data.HeaderHandler, + maxTxRemaining uint32, + maxMbRemaining uint32, + round uint64, + haveTime func() bool, ) (block.MiniBlockSlice, uint32, bool) { - miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) - nrMBprocessed := 0 - - if hdr == nil || hdr.IsInterfaceNil() { - return miniBlocks, nrTxAdded, true - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - if !haveTime() { - break - } - - if hdr.GetMiniBlockProcessed([]byte(key)) { - nrMBprocessed++ - continue - } - - miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) - if miniVal == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - continue - } - - miniBlock, ok := miniVal.(*block.MiniBlock) - if !ok { - continue - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - // overflow would happen if processing would continue - txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining - if txOverFlow { - return miniBlocks, nrTxAdded, false - } - - requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) - if requestedTxs > 0 { - continue - } - - err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) - if err != nil { - continue - } - - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, miniBlock) - nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) - nrMBprocessed++ - - mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining - if mbOverFlow { - return miniBlocks, nrTxAdded, false - } - } - - allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) - return miniBlocks, nrTxAdded, allMBsProcessed + miniBlocks := make(block.MiniBlockSlice, 0) + nrTxAdded := uint32(0) + nrMBprocessed := 0 + + if hdr == nil || hdr.IsInterfaceNil() { + return miniBlocks, nrTxAdded, true + } + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + if !haveTime() { + break + } + + if hdr.GetMiniBlockProcessed([]byte(key)) { + nrMBprocessed++ + continue + } + + miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) + if miniVal == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + continue + } + + miniBlock, ok := miniVal.(*block.MiniBlock) + if !ok { + continue + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + // overflow would happen if processing would continue + txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining + if txOverFlow { + return miniBlocks, nrTxAdded, false + } + + requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) + if requestedTxs > 0 { + continue + } + + err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) + if err != nil { + continue + } + + // all txs processed, add to processed miniblocks + miniBlocks = append(miniBlocks, miniBlock) + nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) + nrMBprocessed++ + + mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining + if mbOverFlow { + return miniBlocks, nrTxAdded, false + } + } + + allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) + return miniBlocks, nrTxAdded, allMBsProcessed } // CreateMbsAndProcessTransactionsFromMe creates miniblocks and processes transactions from pool func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( - maxTxSpaceRemained uint32, - maxMbSpaceRemained uint32, - round uint64, - haveTime func() bool, + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false - - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } - - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } - - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } - - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } - } - } - - interMBs := tc.processAddedInterimTransactions() - if len(interMBs) > 0 { - miniBlocks = append(miniBlocks, interMBs...) - } - - rewardMb := tc.createRewardsMiniBlocks() - if len(rewardMb) == 0 { - log.Error("could not create reward mini-blocks") - } - - rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) - for _, mb := range rewardMb { - err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) - if err != nil { - log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) - } - } - miniBlocks = append(miniBlocks, rewardMb...) - - return miniBlocks + txPreProc := tc.getPreProcessor(block.TxBlock) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + txSpaceRemained := int(maxTxSpaceRemained) + + newMBAdded := true + for newMBAdded { + newMBAdded = false + + for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { + if txSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txPreProc.CreateAndProcessMiniBlock( + tc.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + interMBs := tc.processAddedInterimTransactions() + if len(interMBs) > 0 { + miniBlocks = append(miniBlocks, interMBs...) + } + + rewardMb := tc.createRewardsMiniBlocks() + if len(rewardMb) == 0 { + log.Error("could not create reward mini-blocks") + } + + rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) + for _, mb := range rewardMb { + err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) + if err != nil { + log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) + } + } + miniBlocks = append(miniBlocks, rewardMb...) + + return miniBlocks } func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { - // add rewards transactions to separate miniBlocks - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - rewardsMbs := interimProc.CreateAllInterMiniBlocks() - for _, mb := range rewardsMbs { - miniBlocks = append(miniBlocks, mb) - } - - return miniBlocks + // add rewards transactions to separate miniBlocks + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + rewardsMbs := interimProc.CreateAllInterMiniBlocks() + for _, mb := range rewardsMbs { + miniBlocks = append(miniBlocks, mb) + } + + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { - miniBlocks := make(block.MiniBlockSlice, 0) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysInterimProcs { - if blockType == block.RewardsBlock { - // this has to be processed last - continue - } - - interimProc := tc.getInterimProcessor(blockType) - if interimProc == nil { - // this will never be reached as keysInterimProcs are the actual keys from the interimMap - continue - } - - currMbs := interimProc.CreateAllInterMiniBlocks() - for _, value := range currMbs { - miniBlocks = append(miniBlocks, value) - } - } - - return miniBlocks + miniBlocks := make(block.MiniBlockSlice, 0) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + + interimProc := tc.getInterimProcessor(blockType) + if interimProc == nil { + // this will never be reached as keysInterimProcs are the actual keys from the interimMap + continue + } + + currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { + miniBlocks = append(miniBlocks, value) + } + } + + return miniBlocks } // CreateBlockStarted initializes necessary data for preprocessors at block create or block process func (tc *transactionCoordinator) CreateBlockStarted() { - tc.mutPreProcessor.RLock() - for _, value := range tc.txPreProcessors { - value.CreateBlockStarted() - } - tc.mutPreProcessor.RUnlock() - - tc.mutInterimProcessors.RLock() - for _, value := range tc.interimProcessors { - value.CreateBlockStarted() - } - tc.mutInterimProcessors.RUnlock() + tc.mutPreProcessor.RLock() + for _, value := range tc.txPreProcessors { + value.CreateBlockStarted() + } + tc.mutPreProcessor.RUnlock() + + tc.mutInterimProcessors.RLock() + for _, value := range tc.interimProcessors { + value.CreateBlockStarted() + } + tc.mutInterimProcessors.RUnlock() } func (tc *transactionCoordinator) getPreProcessor(blockType block.Type) process.PreProcessor { - tc.mutPreProcessor.RLock() - preprocessor, exists := tc.txPreProcessors[blockType] - tc.mutPreProcessor.RUnlock() + tc.mutPreProcessor.RLock() + preprocessor, exists := tc.txPreProcessors[blockType] + tc.mutPreProcessor.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return preprocessor + return preprocessor } func (tc *transactionCoordinator) getInterimProcessor(blockType block.Type) process.IntermediateTransactionHandler { - tc.mutInterimProcessors.RLock() - interProcessor, exists := tc.interimProcessors[blockType] - tc.mutInterimProcessors.RUnlock() + tc.mutInterimProcessors.RLock() + interProcessor, exists := tc.interimProcessors[blockType] + tc.mutInterimProcessors.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return interProcessor + return interProcessor } func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType block.Type) (string, error) { - var baseTopic string - - switch mbType { - case block.TxBlock: - baseTopic = factory.TransactionTopic - case block.PeerBlock: - baseTopic = factory.PeerChBodyTopic - case block.SmartContractResultBlock: - baseTopic = factory.UnsignedTransactionTopic - case block.RewardsBlock: - baseTopic = factory.RewardsTransactionTopic - default: - return "", process.ErrUnknownBlockType - } - - transactionTopic := baseTopic + - shardC.CommunicationIdentifier(destShId) - - return transactionTopic, nil + var baseTopic string + + switch mbType { + case block.TxBlock: + baseTopic = factory.TransactionTopic + case block.PeerBlock: + baseTopic = factory.PeerChBodyTopic + case block.SmartContractResultBlock: + baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlock: + baseTopic = factory.RewardsTransactionTopic + default: + return "", process.ErrUnknownBlockType + } + + transactionTopic := baseTopic + + shardC.CommunicationIdentifier(destShId) + + return transactionTopic, nil } // CreateMarshalizedData creates marshalized data for broadcasting func (tc *transactionCoordinator) CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) { - mrsTxs := make(map[string][][]byte) - bodies := make(map[uint32]block.MiniBlockSlice) - - for i := 0; i < len(body); i++ { - miniblock := body[i] - receiverShardId := miniblock.ReceiverShardID - if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard - continue - } - - broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) - if err != nil { - log.Debug(err.Error()) - continue - } - - preproc := tc.getPreProcessor(miniblock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) - - currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) - } - - interimProc := tc.getInterimProcessor(miniblock.Type) - if interimProc == nil || interimProc.IsInterfaceNil() { - continue - } - - currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsInterTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) - } - } - - return bodies, mrsTxs + mrsTxs := make(map[string][][]byte) + bodies := make(map[uint32]block.MiniBlockSlice) + + for i := 0; i < len(body); i++ { + miniblock := body[i] + receiverShardId := miniblock.ReceiverShardID + if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard + continue + } + + broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) + if err != nil { + log.Debug(err.Error()) + continue + } + + preproc := tc.getPreProcessor(miniblock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) + + currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) + } + + interimProc := tc.getInterimProcessor(miniblock.Type) + if interimProc == nil || interimProc.IsInterfaceNil() { + continue + } + + currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsInterTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) + } + } + + return bodies, mrsTxs } // GetAllCurrentUsedTxs returns the cached transaction data for current round func (tc *transactionCoordinator) GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler { - txPool := make(map[string]data.TransactionHandler, 0) - interTxPool := make(map[string]data.TransactionHandler, 0) + txPool := make(map[string]data.TransactionHandler, 0) + interTxPool := make(map[string]data.TransactionHandler, 0) - preProc := tc.getPreProcessor(blockType) - if preProc != nil { - txPool = preProc.GetAllCurrentUsedTxs() - } + preProc := tc.getPreProcessor(blockType) + if preProc != nil { + txPool = preProc.GetAllCurrentUsedTxs() + } - interProc := tc.getInterimProcessor(blockType) - if interProc != nil { - interTxPool = interProc.GetAllCurrentFinishedTxs() - } + interProc := tc.getInterimProcessor(blockType) + if interProc != nil { + interTxPool = interProc.GetAllCurrentFinishedTxs() + } - for hash, tx := range interTxPool { - txPool[hash] = tx - } + for hash, tx := range interTxPool { + txPool[hash] = tx + } - return txPool + return txPool } // RequestMiniBlocks request miniblocks if missing func (tc *transactionCoordinator) RequestMiniBlocks(header data.HeaderHandler) { - if header == nil || header.IsInterfaceNil() { - return - } - - crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - obj, _ := tc.miniBlockPool.Peek([]byte(key)) - if obj == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - } - } + if header == nil || header.IsInterfaceNil() { + return + } + + crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + obj, _ := tc.miniBlockPool.Peek([]byte(key)) + if obj == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + } + } } // receivedMiniBlock is a callback function when a new miniblock was received // it will further ask for missing transactions func (tc *transactionCoordinator) receivedMiniBlock(miniBlockHash []byte) { - val, ok := tc.miniBlockPool.Peek(miniBlockHash) - if !ok { - return - } - - miniBlock, ok := val.(block.MiniBlock) - if !ok { - return - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - return - } - - _ = preproc.RequestTransactionsForMiniBlock(miniBlock) + val, ok := tc.miniBlockPool.Peek(miniBlockHash) + if !ok { + return + } + + miniBlock, ok := val.(block.MiniBlock) + if !ok { + return + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + return + } + + _ = preproc.RequestTransactionsForMiniBlock(miniBlock) } // processMiniBlockComplete - all transactions must be processed together, otherwise error func (tc *transactionCoordinator) processCompleteMiniBlock( - preproc process.PreProcessor, - miniBlock *block.MiniBlock, - round uint64, - haveTime func() bool, + preproc process.PreProcessor, + miniBlock *block.MiniBlock, + round uint64, + haveTime func() bool, ) error { - snapshot := tc.accounts.JournalLen() - err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) - if err != nil { - log.Debug(err.Error()) - errAccountState := tc.accounts.RevertToSnapshot(snapshot) - if errAccountState != nil { - // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Error(errAccountState.Error()) - } + snapshot := tc.accounts.JournalLen() + err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) + if err != nil { + log.Debug(err.Error()) + errAccountState := tc.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } - return err - } + return err + } - return nil + return nil } // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { - tc.mutInterimProcessors.RLock() - defer tc.mutInterimProcessors.RUnlock() - errMutex := sync.Mutex{} - var errFound error - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) - - for key, interimProc := range tc.interimProcessors { - if key == block.RewardsBlock { - // this has to be processed last - wg.Done() - continue - } - - go func(intermediateProcessor process.IntermediateTransactionHandler) { - err := intermediateProcessor.VerifyInterMiniBlocks(body) - if err != nil { - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(interimProc) - } - - wg.Wait() - - if errFound != nil { - return errFound - } - - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - return interimProc.VerifyInterMiniBlocks(body) + tc.mutInterimProcessors.RLock() + defer tc.mutInterimProcessors.RUnlock() + errMutex := sync.Mutex{} + var errFound error + // TODO: think if it is good in parallel or it is needed in sequences + wg := sync.WaitGroup{} + wg.Add(len(tc.interimProcessors)) + + for key, interimProc := range tc.interimProcessors { + if key == block.RewardsBlock { + // this has to be processed last + wg.Done() + continue + } + + go func(intermediateProcessor process.IntermediateTransactionHandler) { + err := intermediateProcessor.VerifyInterMiniBlocks(body) + if err != nil { + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(interimProc) + } + + wg.Wait() + + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } // IsInterfaceNil returns true if there is no value under the interface func (tc *transactionCoordinator) IsInterfaceNil() bool { - if tc == nil { - return true - } - return false + if tc == nil { + return true + } + return false } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 558a0ba9587..6d41da82251 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -1,1865 +1,1865 @@ package coordinator import ( - "bytes" - "encoding/hex" - "errors" - "math/big" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/storage/memorydb" - "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/stretchr/testify/assert" + "bytes" + "encoding/hex" + "errors" + "math/big" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" ) func createShardedDataChacherNotifier( - handler data.TransactionHandler, - testHash []byte, + handler data.TransactionHandler, + testHash []byte, ) (func() dataRetriever.ShardedDataCacherNotifier ) { - return func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return handler, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return handler, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - } + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } } func initDataPool(testHash []byte) *mock.PoolsHolderStub { - tx := &transaction.Transaction{Nonce: 10} - sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} - rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} - - txCalled := createShardedDataChacherNotifier(tx, testHash) - unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) - rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) - - sdp := &mock.PoolsHolderStub{ - TransactionsCalled: txCalled, - UnsignedTransactionsCalled: unsignedTxHandler, - RewardTransactionsCalled: rewardTxCalled, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{ - MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, - HasCalled: func(nonce uint64, shardId uint32) bool { - return true - }, - } - }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - }, - MiniBlocksCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - cs.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("bbb"), key) { - return make(block.MiniBlockSlice, 0), true - } - - return nil, false - } - cs.RegisterHandlerCalled = func(i func(key []byte)) {} - cs.RemoveCalled = func(key []byte) {} - cs.PutCalled = func(key []byte, value interface{}) (evicted bool) { - return false - } - return cs - }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - return cs - }, - } - return sdp + tx := &transaction.Transaction{Nonce: 10} + sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} + rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} + + txCalled := createShardedDataChacherNotifier(tx, testHash) + unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) + rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) + + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxHandler, + RewardTransactionsCalled: rewardTxCalled, + HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{ + MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, + HasCalled: func(nonce uint64, shardId uint32) bool { + return true + }, + } + }, + MetaBlocksCalled: func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + }, + MiniBlocksCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + cs.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("bbb"), key) { + return make(block.MiniBlockSlice, 0), true + } + + return nil, false + } + cs.RegisterHandlerCalled = func(i func(key []byte)) {} + cs.RemoveCalled = func(key []byte) {} + cs.PutCalled = func(key []byte, value interface{}) (evicted bool) { + return false + } + return cs + }, + HeadersCalled: func() storage.Cacher { + cs := &mock.CacherStub{} + cs.RegisterHandlerCalled = func(i func(key []byte)) { + } + return cs + }, + } + return sdp } func containsHash(txHashes [][]byte, hash []byte) bool { - for _, txHash := range txHashes { - if bytes.Equal(hash, txHash) { - return true - } - } - return false + for _, txHash := range txHashes { + if bytes.Equal(hash, txHash) { + return true + } + } + return false } func initStore() *dataRetriever.ChainStorer { - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MiniBlockUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) - store.AddStorer(dataRetriever.PeerChangesUnit, generateTestUnit()) - store.AddStorer(dataRetriever.BlockHeaderUnit, generateTestUnit()) - store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) - return store + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, generateTestUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, generateTestUnit()) + store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) + return store } func generateTestCache() storage.Cacher { - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 1000, 1) - return cache + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 1000, 1) + return cache } func generateTestUnit() storage.Storer { - memDB, _ := memorydb.New() + memDB, _ := memorydb.New() - storer, _ := storageUnit.NewStorageUnit( - generateTestCache(), - memDB, - ) + storer, _ := storageUnit.NewStorageUnit( + generateTestCache(), + memDB, + ) - return storer + return storer } func initAccountsMock() *mock.AccountsStub { - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - return &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + return &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + } } func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - nil, - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + nil, + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - nil, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilAccountsAdapter, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + nil, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilAccountsAdapter, err) } func TestNewTransactionCoordinator_NilDataPool(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - nil, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + nil, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - nil, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilRequestHandler, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + nil, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilRequestHandler, err) } func TestNewTransactionCoordinator_NilHasher(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - nil, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilPreProcessorsContainer, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + nil, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilPreProcessorsContainer, err) } func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - nil, - ) - - assert.Nil(t, tc) - assert.Equal(t, process.ErrNilIntermediateProcessorContainer, err) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + nil, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilIntermediateProcessorContainer, err) } func TestNewTransactionCoordinator_OK(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - assert.Nil(t, err) - assert.NotNil(t, tc) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, tc) } func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - separated := tc.separateBodyByType(nil) - assert.Equal(t, 0, len(separated)) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + separated := tc.separateBodyByType(nil) + assert.Equal(t, 0, len(separated)) } func TestTransactionCoordinator_SeparateBody(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.TxBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) - - separated := tc.separateBodyByType(body) - assert.Equal(t, 2, len(separated)) - assert.Equal(t, 3, len(separated[block.TxBlock])) - assert.Equal(t, 4, len(separated[block.SmartContractResultBlock])) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.TxBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock}) + + separated := tc.separateBodyByType(body) + assert.Equal(t, 2, len(separated)) + assert.Equal(t, 3, len(separated[block.TxBlock])) + assert.Equal(t, 4, len(separated[block.SmartContractResultBlock])) } func createPreProcessorContainer() process.PreProcessorsContainer { - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - initDataPool([]byte("tx_hash0")), - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + initDataPool([]byte("tx_hash0")), + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + return container } func createInterimProcessorContainer() process.IntermediateProcessorContainer { - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - initStore(), - initDataPool([]byte("test_hash1")), - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + initStore(), + initDataPool([]byte("test_hash1")), + ) + container, _ := preFactory.Create() + + return container } func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) process.PreProcessorsContainer { - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - return container + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + return container } func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.CreateBlockStarted() - - tc.mutPreProcessor.Lock() - for _, value := range tc.txPreProcessors { - txs := value.GetAllCurrentUsedTxs() - assert.Equal(t, 0, len(txs)) - } - tc.mutPreProcessor.Unlock() + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.CreateBlockStarted() + + tc.mutPreProcessor.Lock() + for _, value := range tc.txPreProcessors { + txs := value.GetAllCurrentUsedTxs() + assert.Equal(t, 0, len(txs)) + } + tc.mutPreProcessor.Unlock() } func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - mrBody, mrTxs := tc.CreateMarshalizedData(nil) - assert.Equal(t, 0, len(mrTxs)) - assert.Equal(t, 0, len(mrBody)) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + mrBody, mrTxs := tc.CreateMarshalizedData(nil) + assert.Equal(t, 0, len(mrTxs)) + assert.Equal(t, 0, len(mrBody)) } func createMiniBlockWithOneTx(sndId, dstId uint32, blockType block.Type, txHash []byte) *block.MiniBlock { - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) - return &block.MiniBlock{Type: blockType, SenderShardID: sndId, ReceiverShardID: dstId, TxHashes: txHashes} + return &block.MiniBlock{Type: blockType, SenderShardID: sndId, ReceiverShardID: dstId, TxHashes: txHashes} } func createTestBody() block.Body { - body := block.Body{} + body := block.Body{} - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash2"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash3"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash4"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash5"))) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash6"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash2"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash3"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash4"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash5"))) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, []byte("tx_hash6"))) - return body + return body } func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - mrBody, mrTxs := tc.CreateMarshalizedData(createTestBody()) - assert.Equal(t, 0, len(mrTxs)) - assert.Equal(t, 1, len(mrBody)) - assert.Equal(t, len(createTestBody()), len(mrBody[1])) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + mrBody, mrTxs := tc.CreateMarshalizedData(createTestBody()) + assert.Equal(t, 0, len(mrTxs)) + assert.Equal(t, 1, len(mrBody)) + assert.Equal(t, len(createTestBody()), len(mrBody[1])) } func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) { - t.Parallel() - - interimContainer := createInterimProcessorContainer() - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - interimContainer, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - scrs := make([]data.TransactionHandler, 0) - body := block.Body{} - body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) - - scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99)} - scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199)} - scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299)} - scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - scrs = append(scrs, scr) - body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - - scrInterimProc, _ := interimContainer.Get(block.SmartContractResultBlock) - _ = scrInterimProc.AddIntermediateTransactions(scrs) - - mrBody, mrTxs := tc.CreateMarshalizedData(body) - assert.Equal(t, 1, len(mrTxs)) - - marshalizer := &mock.MarshalizerMock{} - topic := factory.UnsignedTransactionTopic + "_0_1" - assert.Equal(t, len(scrs), len(mrTxs[topic])) - for i := 0; i < len(mrTxs[topic]); i++ { - unMrsScr := &smartContractResult.SmartContractResult{} - _ = marshalizer.Unmarshal(unMrsScr, mrTxs[topic][i]) - - assert.Equal(t, unMrsScr, scrs[i]) - } - - assert.Equal(t, 1, len(mrBody)) + t.Parallel() + + interimContainer := createInterimProcessorContainer() + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + interimContainer, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + scrs := make([]data.TransactionHandler, 0) + body := block.Body{} + body = append(body, createMiniBlockWithOneTx(0, 1, block.TxBlock, []byte("tx_hash1"))) + + scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99)} + scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199)} + scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299)} + scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + scrs = append(scrs, scr) + body = append(body, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) + + scrInterimProc, _ := interimContainer.Get(block.SmartContractResultBlock) + _ = scrInterimProc.AddIntermediateTransactions(scrs) + + mrBody, mrTxs := tc.CreateMarshalizedData(body) + assert.Equal(t, 1, len(mrTxs)) + + marshalizer := &mock.MarshalizerMock{} + topic := factory.UnsignedTransactionTopic + "_0_1" + assert.Equal(t, len(scrs), len(mrTxs[topic])) + for i := 0; i < len(mrTxs[topic]); i++ { + unMrsScr := &smartContractResult.SmartContractResult{} + _ = marshalizer.Unmarshal(unMrsScr, mrTxs[topic][i]) + + assert.Equal(t, unMrsScr, scrs[i]) + } + + assert.Equal(t, 1, len(mrBody)) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNilHeader(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.True(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.True(t, finalized) } func createTestMetablock() *block.MetaBlock { - meta := &block.MetaBlock{} + meta := &block.MetaBlock{} - meta.ShardInfo = make([]block.ShardData, 0) + meta.ShardInfo = make([]block.ShardData, 0) - shardMbs := make([]block.ShardMiniBlockHeader, 0) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb0"), SenderShardId: 0, ReceiverShardId: 0, TxCount: 1}) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb1"), SenderShardId: 0, ReceiverShardId: 1, TxCount: 1}) - shardData := block.ShardData{ShardId: 0, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} + shardMbs := make([]block.ShardMiniBlockHeader, 0) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb0"), SenderShardId: 0, ReceiverShardId: 0, TxCount: 1}) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb1"), SenderShardId: 0, ReceiverShardId: 1, TxCount: 1}) + shardData := block.ShardData{ShardId: 0, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} - meta.ShardInfo = append(meta.ShardInfo, shardData) + meta.ShardInfo = append(meta.ShardInfo, shardData) - shardMbs = make([]block.ShardMiniBlockHeader, 0) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb2"), SenderShardId: 1, ReceiverShardId: 0, TxCount: 1}) - shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb3"), SenderShardId: 1, ReceiverShardId: 1, TxCount: 1}) - shardData = block.ShardData{ShardId: 1, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} + shardMbs = make([]block.ShardMiniBlockHeader, 0) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb2"), SenderShardId: 1, ReceiverShardId: 0, TxCount: 1}) + shardMbs = append(shardMbs, block.ShardMiniBlockHeader{Hash: []byte("mb3"), SenderShardId: 1, ReceiverShardId: 1, TxCount: 1}) + shardData = block.ShardData{ShardId: 1, HeaderHash: []byte("header0"), TxCount: 2, ShardMiniBlockHeaders: shardMbs} - meta.ShardInfo = append(meta.ShardInfo, shardData) + meta.ShardInfo = append(meta.ShardInfo, shardData) - return meta + return meta } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNoTime(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return false - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.False(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return false + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.False(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothingInPool(t *testing.T) { - t.Parallel() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - createPreProcessorContainer(), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) - assert.Equal(t, uint32(0), txs) - assert.False(t, finalized) + t.Parallel() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + createPreProcessorContainer(), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) + assert.Equal(t, uint32(0), txs) + assert.False(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - tdp.MiniBlocksCalled = func() storage.Cacher { - return hdrPool - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - metaHdr := createTestMetablock() - - for i := 0; i < len(metaHdr.ShardInfo); i++ { - for j := 0; j < len(metaHdr.ShardInfo[i].ShardMiniBlockHeaders); j++ { - mbHdr := metaHdr.ShardInfo[i].ShardMiniBlockHeaders[j] - mb := block.MiniBlock{SenderShardID: mbHdr.SenderShardId, ReceiverShardID: mbHdr.ReceiverShardId, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - tdp.MiniBlocks().Put(mbHdr.Hash, &mb) - } - } - - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 1, len(mbs)) - assert.Equal(t, uint32(1), txs) - assert.True(t, finalized) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + tdp.MiniBlocksCalled = func() storage.Cacher { + return hdrPool + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + metaHdr := createTestMetablock() + + for i := 0; i < len(metaHdr.ShardInfo); i++ { + for j := 0; j < len(metaHdr.ShardInfo[i].ShardMiniBlockHeaders); j++ { + mbHdr := metaHdr.ShardInfo[i].ShardMiniBlockHeaders[j] + mb := block.MiniBlock{SenderShardID: mbHdr.SenderShardId, ReceiverShardID: mbHdr.ReceiverShardId, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + tdp.MiniBlocks().Put(mbHdr.Hash, &mb) + } + } + + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 1, len(mbs)) + assert.Equal(t, uint32(1), txs) + assert.True(t, finalized) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToProcess(t *testing.T) { - t.Parallel() - - shardedCacheMock := &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return shardedCacheMock - }, - }, - &mock.AddressConverterMock{}, - &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - mock.NewPoolsHolderMock(), - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + + shardedCacheMock := &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, + }, + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock(), + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoTime(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return false - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return false + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoSpace(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(5), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(0) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, 0, len(mbs)) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(5), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(0) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, 0, len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing.T) { - t.Parallel() - - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - nrShards := uint32(5) - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - for shId := uint32(0); shId < nrShards; shId++ { - strCache := process.ShardCacherIdentifier(0, shId) - newTx := &transaction.Transaction{GasLimit: uint64(shId)} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - } - - // we have one tx per shard. - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, int(nrShards), len(mbs)) + t.Parallel() + + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + nrShards := uint32(5) + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + for shId := uint32(0); shId < nrShards; shId++ { + strCache := process.ShardCacherIdentifier(0, shId) + newTx := &transaction.Transaction{GasLimit: uint64(shId)} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + } + + // we have one tx per shard. + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, int(nrShards), len(mbs)) } func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMiniblocks(t *testing.T) { - t.Parallel() - - nrShards := uint32(5) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - - sndShardId := uint32(0) - dstShardId := uint32(1) - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - - numTxsToAdd := 5 - gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) - - scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") - addedTxs := make([]*transaction.Transaction, 0) - - allTxs := 100 - for i := 0; i < allTxs; i++ { - newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - - addedTxs = append(addedTxs, newTx) - } - - // we have one tx per shard. - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - - assert.Equal(t, allTxs/numTxsToAdd, len(mbs)) + t.Parallel() + + nrShards := uint32(5) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + + sndShardId := uint32(0) + dstShardId := uint32(1) + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + + numTxsToAdd := 5 + gasLimit := process.MaxGasLimitPerMiniBlock / uint64(numTxsToAdd) + + scAddress, _ := hex.DecodeString("000000000000000000005fed9c659422cd8429ce92f8973bba2a9fb51e0eb3a1") + addedTxs := make([]*transaction.Transaction, 0) + + allTxs := 100 + for i := 0; i < allTxs; i++ { + newTx := &transaction.Transaction{GasLimit: gasLimit, GasPrice: uint64(i), RcvAddr: scAddress} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + + addedTxs = append(addedTxs, newTx) + } + + // we have one tx per shard. + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + + assert.Equal(t, allTxs/numTxsToAdd, len(mbs)) } func TestTransactionCoordinator_GetAllCurrentUsedTxs(t *testing.T) { - t.Parallel() - - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return txPool - } - nrShards := uint32(5) - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - usedTxs := tc.GetAllCurrentUsedTxs(block.TxBlock) - assert.Equal(t, 0, len(usedTxs)) - - // create block to have some txs - maxTxRemaining := uint32(15000) - maxMbRemaining := uint32(15000) - haveTime := func() bool { - return true - } - - marshalizer := &mock.MarshalizerMock{} - hasher := &mock.HasherMock{} - for i := uint32(0); i < nrShards; i++ { - strCache := process.ShardCacherIdentifier(0, i) - newTx := &transaction.Transaction{GasLimit: uint64(i)} - - txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) - txPool.AddData(txHash, newTx, strCache) - } - - mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) - assert.Equal(t, int(nrShards), len(mbs)) - - usedTxs = tc.GetAllCurrentUsedTxs(block.TxBlock) - assert.Equal(t, 5, len(usedTxs)) + t.Parallel() + + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return txPool + } + nrShards := uint32(5) + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + usedTxs := tc.GetAllCurrentUsedTxs(block.TxBlock) + assert.Equal(t, 0, len(usedTxs)) + + // create block to have some txs + maxTxRemaining := uint32(15000) + maxMbRemaining := uint32(15000) + haveTime := func() bool { + return true + } + + marshalizer := &mock.MarshalizerMock{} + hasher := &mock.HasherMock{} + for i := uint32(0); i < nrShards; i++ { + strCache := process.ShardCacherIdentifier(0, i) + newTx := &transaction.Transaction{GasLimit: uint64(i)} + + txHash, _ := core.CalculateHash(marshalizer, hasher, newTx) + txPool.AddData(txHash, newTx, strCache) + } + + mbs := tc.CreateMbsAndProcessTransactionsFromMe(maxTxRemaining, maxMbRemaining, 10, haveTime) + assert.Equal(t, int(nrShards), len(mbs)) + + usedTxs = tc.GetAllCurrentUsedTxs(block.TxBlock) + assert.Equal(t, 5, len(usedTxs)) } func TestTransactionCoordinator_RequestBlockTransactionsNilBody(t *testing.T) { - t.Parallel() - - tdp := initDataPool([]byte("tx_hash1")) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.RequestBlockTransactions(nil) - - tc.mutRequestedTxs.Lock() - for _, value := range tc.requestedTxs { - assert.Equal(t, 0, value) - } - tc.mutRequestedTxs.Unlock() + t.Parallel() + + tdp := initDataPool([]byte("tx_hash1")) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.RequestBlockTransactions(nil) + + tc.mutRequestedTxs.Lock() + for _, value := range tc.requestedTxs { + assert.Equal(t, 0, value) + } + tc.mutRequestedTxs.Unlock() } func TestTransactionCoordinator_RequestBlockTransactionsRequestOne(t *testing.T) { - t.Parallel() - - txHashInPool := []byte("tx_hash1") - tdp := initDataPool(txHashInPool) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashInPool, txHashToAsk}} - body = append(body, miniBlock) - tc.RequestBlockTransactions(body) - - tc.mutRequestedTxs.Lock() - assert.Equal(t, 1, tc.requestedTxs[block.TxBlock]) - tc.mutRequestedTxs.Unlock() - - haveTime := func() time.Duration { - return time.Second - } - err = tc.IsDataPreparedForProcessing(haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) + t.Parallel() + + txHashInPool := []byte("tx_hash1") + tdp := initDataPool(txHashInPool) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashInPool, txHashToAsk}} + body = append(body, miniBlock) + tc.RequestBlockTransactions(body) + + tc.mutRequestedTxs.Lock() + assert.Equal(t, 1, tc.requestedTxs[block.TxBlock]) + tc.mutRequestedTxs.Unlock() + + haveTime := func() time.Duration { + return time.Second + } + err = tc.IsDataPreparedForProcessing(haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) } func TestTransactionCoordinator_IsDataPreparedForProcessing(t *testing.T) { - t.Parallel() - - tdp := initDataPool([]byte("tx_hash1")) - nrShards := uint32(5) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(nrShards), - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.IsDataPreparedForProcessing(haveTime) - assert.Nil(t, err) + t.Parallel() + + tdp := initDataPool([]byte("tx_hash1")) + nrShards := uint32(5) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(nrShards), + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.IsDataPreparedForProcessing(haveTime) + assert.Nil(t, err) } func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //1 tx hash will be in cache - //2 will be requested on network - - txHash1 := []byte("tx hash 1 found in cache") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - //put this miniblock inside datapool - miniBlockHash := []byte("miniblock hash") - dataPool.MiniBlocks().Put(miniBlockHash, miniBlock) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{}, cacheId) - - txHash1Requested := int32(0) - txHash2Requested := int32(0) - txHash3Requested := int32(0) - - requestHandler := &mock.RequestHandlerMock{} - requestHandler.RequestTransactionHandlerCalled = func(destShardID uint32, txHashes [][]byte) { - if containsHash(txHashes, txHash1) { - atomic.AddInt32(&txHash1Requested, 1) - } - if containsHash(txHashes, txHash2) { - atomic.AddInt32(&txHash2Requested, 1) - } - if containsHash(txHashes, txHash3) { - atomic.AddInt32(&txHash3Requested, 1) - } - } - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - requestHandler, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - requestHandler, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - tc.receivedMiniBlock(miniBlockHash) - - //we have to wait to be sure txHash1Requested is not incremented by a late call - time.Sleep(time.Second) - - assert.Equal(t, int32(0), atomic.LoadInt32(&txHash1Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //1 tx hash will be in cache + //2 will be requested on network + + txHash1 := []byte("tx hash 1 found in cache") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + //put this miniblock inside datapool + miniBlockHash := []byte("miniblock hash") + dataPool.MiniBlocks().Put(miniBlockHash, miniBlock) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{}, cacheId) + + txHash1Requested := int32(0) + txHash2Requested := int32(0) + txHash3Requested := int32(0) + + requestHandler := &mock.RequestHandlerMock{} + requestHandler.RequestTransactionHandlerCalled = func(destShardID uint32, txHashes [][]byte) { + if containsHash(txHashes, txHash1) { + atomic.AddInt32(&txHash1Requested, 1) + } + if containsHash(txHashes, txHash2) { + atomic.AddInt32(&txHash2Requested, 1) + } + if containsHash(txHashes, txHash3) { + atomic.AddInt32(&txHash3Requested, 1) + } + } + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + requestHandler, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + requestHandler, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + tc.receivedMiniBlock(miniBlockHash) + + //we have to wait to be sure txHash1Requested is not incremented by a late call + time.Sleep(time.Second) + + assert.Equal(t, int32(0), atomic.LoadInt32(&txHash1Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&txHash2Requested)) } func TestTransactionCoordinator_SaveBlockDataToStorage(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.SaveBlockDataToStorage(nil) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.SaveBlockDataToStorage(nil) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) - assert.Nil(t, err) - assert.Equal(t, 0, nrTxs) - assert.Equal(t, 0, len(mbs)) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) - assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) - assert.Nil(t, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, process.ErrMissingTransaction, err) - - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) - assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) - assert.NotNil(t, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) + assert.Nil(t, err) + assert.Equal(t, 0, nrTxs) + assert.Equal(t, 0, len(mbs)) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + assert.Equal(t, 1, nrTxs) + assert.Equal(t, 1, len(mbs)) + assert.Nil(t, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, process.ErrMissingTransaction, err) + + nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + assert.Equal(t, 1, nrTxs) + assert.Equal(t, 1, len(mbs)) + assert.NotNil(t, err) } func TestTransactionCoordinator_RemoveBlockDataFromPool(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(dataPool), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.RemoveBlockDataFromPool(nil) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.RemoveBlockDataFromPool(body) - assert.Nil(t, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(dataPool), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.RemoveBlockDataFromPool(nil) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.RemoveBlockDataFromPool(body) + assert.Nil(t, err) } func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return process.ErrHigherNonceInTransaction - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.ProcessBlockTransaction(nil, 10, haveTime) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - - noTime := func() time.Duration { - return 0 - } - err = tc.ProcessBlockTransaction(body, 10, noTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return process.ErrHigherNonceInTransaction + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.ProcessBlockTransaction(nil, 10, haveTime) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + + noTime := func() time.Duration { + return 0 + } + err = tc.ProcessBlockTransaction(body, 10, noTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) } func TestTransactionCoordinator_ProcessBlockTransaction(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(dataPool), - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() time.Duration { - return time.Second - } - err = tc.ProcessBlockTransaction(nil, 10, haveTime) - assert.Nil(t, err) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Nil(t, err) - - noTime := func() time.Duration { - return -1 - } - err = tc.ProcessBlockTransaction(body, 10, noTime) - assert.Equal(t, process.ErrTimeIsOut, err) - - txHashToAsk := []byte("tx_hashnotinPool") - miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} - body = append(body, miniBlock) - err = tc.ProcessBlockTransaction(body, 10, haveTime) - assert.Equal(t, process.ErrMissingTransaction, err) + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(dataPool), + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() time.Duration { + return time.Second + } + err = tc.ProcessBlockTransaction(nil, 10, haveTime) + assert.Nil(t, err) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Nil(t, err) + + noTime := func() time.Duration { + return -1 + } + err = tc.ProcessBlockTransaction(body, 10, noTime) + assert.Equal(t, process.ErrTimeIsOut, err) + + txHashToAsk := []byte("tx_hashnotinPool") + miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + body = append(body, miniBlock) + err = tc.ProcessBlockTransaction(body, 10, haveTime) + assert.Equal(t, process.ErrMissingTransaction, err) } func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - dataPool := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - nrCalled := 0 - mutex := sync.Mutex{} - - requestHandler := &mock.RequestHandlerMock{ - RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - mutex.Lock() - nrCalled++ - mutex.Unlock() - }, - } - - accounts := initAccountsMock() - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - dataPool, - &mock.AddressConverterMock{}, - accounts, - requestHandler, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - accounts, - dataPool, - requestHandler, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - tc.RequestMiniBlocks(nil) - time.Sleep(time.Second) - mutex.Lock() - assert.Equal(t, 0, nrCalled) - mutex.Unlock() - - header := createTestMetablock() - tc.RequestMiniBlocks(header) - - crossMbs := header.GetMiniBlockHeadersWithDst(shardCoordinator.SelfId()) - time.Sleep(time.Second) - mutex.Lock() - assert.Equal(t, len(crossMbs), nrCalled) - mutex.Unlock() + t.Parallel() + + txHash := []byte("tx_hash1") + dataPool := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nrCalled := 0 + mutex := sync.Mutex{} + + requestHandler := &mock.RequestHandlerMock{ + RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + mutex.Lock() + nrCalled++ + mutex.Unlock() + }, + } + + accounts := initAccountsMock() + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + dataPool, + &mock.AddressConverterMock{}, + accounts, + requestHandler, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + accounts, + dataPool, + requestHandler, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + tc.RequestMiniBlocks(nil) + time.Sleep(time.Second) + mutex.Lock() + assert.Equal(t, 0, nrCalled) + mutex.Unlock() + + header := createTestMetablock() + tc.RequestMiniBlocks(header) + + crossMbs := header.GetMiniBlockHeadersWithDst(shardCoordinator.SelfId()) + time.Sleep(time.Second) + mutex.Lock() + assert.Equal(t, len(crossMbs), nrCalled) + mutex.Unlock() } func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNotRevertAccntState(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //all txs will be in datapool and none of them will return err when processed - //so, tx processor will return nil on processing tx - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - tx1ExecutionResult := uint64(0) - tx2ExecutionResult := uint64(0) - tx3ExecutionResult := uint64(0) - - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { - tx1ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash2) { - tx2ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash3) { - tx3ExecutionResult = transaction.Nonce - } - - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() bool { - return true - } - preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) - - assert.Nil(t, err) - assert.Equal(t, tx1Nonce, tx1ExecutionResult) - assert.Equal(t, tx2Nonce, tx2ExecutionResult) - assert.Equal(t, tx3Nonce, tx3ExecutionResult) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //all txs will be in datapool and none of them will return err when processed + //so, tx processor will return nil on processing tx + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + tx1ExecutionResult := uint64(0) + tx2ExecutionResult := uint64(0) + tx3ExecutionResult := uint64(0) + + accounts := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + //execution, in this context, means moving the tx nonce to itx corresponding execution result variable + if transaction.Data == string(txHash1) { + tx1ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash2) { + tx2ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash3) { + tx3ExecutionResult = transaction.Nonce + } + + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() bool { + return true + } + preproc := tc.getPreProcessor(block.TxBlock) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) + + assert.Nil(t, err) + assert.Equal(t, tx1Nonce, tx1ExecutionResult) + assert.Equal(t, tx2Nonce, tx2ExecutionResult) + assert.Equal(t, tx3Nonce, tx3ExecutionResult) } func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallRevertAccntState(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a miniblock that will have 3 tx hashes - //all txs will be in datapool and none of them will return err when processed - //so, tx processor will return nil on processing tx - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2 - this will cause the tx processor to err") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(1) - receiverShardId := uint32(2) - - miniBlock := block.MiniBlock{ - SenderShardID: senderShardId, - ReceiverShardID: receiverShardId, - TxHashes: [][]byte{txHash1, txHash2, txHash3}, - } - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - currentJournalLen := 445 - revertAccntStateCalled := false - - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - if snapshot == currentJournalLen { - revertAccntStateCalled = true - } - - return nil - }, - JournalLenCalled: func() int { - return currentJournalLen - }, - } - - preFactory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - if transaction.Data == string(txHash2) { - return process.ErrHigherNonceInTransaction - } - return nil - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - haveTime := func() bool { - return true - } - preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) - - assert.Equal(t, process.ErrHigherNonceInTransaction, err) - assert.True(t, revertAccntStateCalled) + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a miniblock that will have 3 tx hashes + //all txs will be in datapool and none of them will return err when processed + //so, tx processor will return nil on processing tx + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2 - this will cause the tx processor to err") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(1) + receiverShardId := uint32(2) + + miniBlock := block.MiniBlock{ + SenderShardID: senderShardId, + ReceiverShardID: receiverShardId, + TxHashes: [][]byte{txHash1, txHash2, txHash3}, + } + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + currentJournalLen := 445 + revertAccntStateCalled := false + + accounts := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + if snapshot == currentJournalLen { + revertAccntStateCalled = true + } + + return nil + }, + JournalLenCalled: func() int { + return currentJournalLen + }, + } + + preFactory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + if transaction.Data == string(txHash2) { + return process.ErrHigherNonceInTransaction + } + return nil + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + haveTime := func() bool { + return true + } + preproc := tc.getPreProcessor(block.TxBlock) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, 0, haveTime) + + assert.Equal(t, process.ErrHigherNonceInTransaction, err) + assert.True(t, revertAccntStateCalled) } func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - adrConv := &mock.AddressConverterMock{} - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - adrConv, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - tdp, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - container, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - err = tc.VerifyCreatedBlockTransactions(nil) - assert.Nil(t, err) - - body := block.Body{&block.MiniBlock{Type: block.TxBlock}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) - - body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) - - body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Equal(t, process.ErrNilMiniBlocks, err) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) + adrConv := &mock.AddressConverterMock{} + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + adrConv, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + tdp, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + container, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + err = tc.VerifyCreatedBlockTransactions(nil) + assert.Nil(t, err) + + body := block.Body{&block.MiniBlock{Type: block.TxBlock}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) + + body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) + + body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Equal(t, process.ErrNilMiniBlocks, err) } func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { - t.Parallel() - - txHash := []byte("txHash") - tdp := initDataPool(txHash) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - adrConv := &mock.AddressConverterMock{} - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - adrConv, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - tdp, - ) - container, _ := preFactory.Create() - - tc, err := NewTransactionCoordinator( - shardCoordinator, - &mock.AccountsStub{}, - tdp, - &mock.RequestHandlerMock{}, - &mock.PreProcessorContainerMock{}, - container, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - sndAddr := []byte("0") - rcvAddr := []byte("1") - scr := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: sndAddr, RcvAddr: rcvAddr} - scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) - - shardCoordinator.ComputeIdCalled = func(address state.AddressContainer) uint32 { - if bytes.Equal(address.Bytes(), sndAddr) { - return shardCoordinator.SelfId() - } - if bytes.Equal(address.Bytes(), rcvAddr) { - return shardCoordinator.SelfId() + 1 - } - return shardCoordinator.SelfId() + 2 - } - - tdp.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, scrHash) { - return scr, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, scrHash) { - return scr, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - } - - interProc, _ := container.Get(block.SmartContractResultBlock) - tx, _ := tdp.UnsignedTransactions().SearchFirstData(scrHash) - txs := make([]data.TransactionHandler, 0) - txs = append(txs, tx.(data.TransactionHandler)) - err = interProc.AddIntermediateTransactions(txs) - assert.Nil(t, err) - - body := block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) + t.Parallel() + + txHash := []byte("txHash") + tdp := initDataPool(txHash) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) + adrConv := &mock.AddressConverterMock{} + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + shardCoordinator, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + adrConv, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + tdp, + ) + container, _ := preFactory.Create() + + tc, err := NewTransactionCoordinator( + shardCoordinator, + &mock.AccountsStub{}, + tdp, + &mock.RequestHandlerMock{}, + &mock.PreProcessorContainerMock{}, + container, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + sndAddr := []byte("0") + rcvAddr := []byte("1") + scr := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: sndAddr, RcvAddr: rcvAddr} + scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, scr) + + shardCoordinator.ComputeIdCalled = func(address state.AddressContainer) uint32 { + if bytes.Equal(address.Bytes(), sndAddr) { + return shardCoordinator.SelfId() + } + if bytes.Equal(address.Bytes(), rcvAddr) { + return shardCoordinator.SelfId() + 1 + } + return shardCoordinator.SelfId() + 2 + } + + tdp.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, scrHash) { + return scr, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, scrHash) { + return scr, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } + + interProc, _ := container.Get(block.SmartContractResultBlock) + tx, _ := tdp.UnsignedTransactions().SearchFirstData(scrHash) + txs := make([]data.TransactionHandler, 0) + txs = append(txs, tx.(data.TransactionHandler)) + err = interProc.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + body := block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}} + err = tc.VerifyCreatedBlockTransactions(body) + assert.Nil(t, err) } func TestTransactionCoordinator_SaveBlockDataToStorageSaveIntermediateTxsErrors(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - retError := errors.New("save error") - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{ - KeysCalled: func() []block.Type { - return []block.Type{block.SmartContractResultBlock} - }, - GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { - if key == block.SmartContractResultBlock { - return &mock.IntermediateTransactionHandlerMock{ - SaveCurrentIntermediateTxToStorageCalled: func() error { - return retError - }, - }, nil - } - return nil, errors.New("invalid handler type") - }, - }, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Equal(t, retError, err) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + retError := errors.New("save error") + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{ + KeysCalled: func() []block.Type { + return []block.Type{block.SmartContractResultBlock} + }, + GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { + if key == block.SmartContractResultBlock { + return &mock.IntermediateTransactionHandlerMock{ + SaveCurrentIntermediateTxToStorageCalled: func() error { + return retError + }, + }, nil + } + return nil, errors.New("invalid handler type") + }, + }, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Equal(t, retError, err) } func TestTransactionCoordinator_SaveBlockDataToStorageCallsSaveIntermediate(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - intermediateTxWereSaved := false - tc, err := NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - createPreProcessorContainerWithDataPool(tdp), - &mock.InterimProcessorContainerMock{ - KeysCalled: func() []block.Type { - return []block.Type{block.SmartContractResultBlock} - }, - GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { - if key == block.SmartContractResultBlock { - return &mock.IntermediateTransactionHandlerMock{ - SaveCurrentIntermediateTxToStorageCalled: func() error { - intermediateTxWereSaved = true - return nil - }, - }, nil - } - return nil, errors.New("invalid handler type") - }, - }, - ) - assert.Nil(t, err) - assert.NotNil(t, tc) - - body := block.Body{} - miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} - body = append(body, miniBlock) - - tc.RequestBlockTransactions(body) - - err = tc.SaveBlockDataToStorage(body) - assert.Nil(t, err) - - assert.True(t, intermediateTxWereSaved) + t.Parallel() + + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + intermediateTxWereSaved := false + tc, err := NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + createPreProcessorContainerWithDataPool(tdp), + &mock.InterimProcessorContainerMock{ + KeysCalled: func() []block.Type { + return []block.Type{block.SmartContractResultBlock} + }, + GetCalled: func(key block.Type) (handler process.IntermediateTransactionHandler, e error) { + if key == block.SmartContractResultBlock { + return &mock.IntermediateTransactionHandlerMock{ + SaveCurrentIntermediateTxToStorageCalled: func() error { + intermediateTxWereSaved = true + return nil + }, + }, nil + } + return nil, errors.New("invalid handler type") + }, + }, + ) + assert.Nil(t, err) + assert.NotNil(t, tc) + + body := block.Body{} + miniBlock := &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + body = append(body, miniBlock) + + tc.RequestBlockTransactions(body) + + err = tc.SaveBlockDataToStorage(body) + assert.Nil(t, err) + + assert.True(t, intermediateTxWereSaved) } diff --git a/process/errors.go b/process/errors.go index 34c6d052b4c..9f32c03795a 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1,7 +1,7 @@ package process import ( - "errors" + "errors" ) // ErrNilMessage signals that a nil message has been received diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index d78da24f9f3..af20dad71e3 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -1,494 +1,494 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/crypto" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/interceptors" - "github.com/ElrondNetwork/elrond-go/process/dataValidators" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" - "github.com/ElrondNetwork/elrond-go/process/transaction" - "github.com/ElrondNetwork/elrond-go/process/unsigned" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go/process/dataValidators" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/ElrondNetwork/elrond-go/process/transaction" + "github.com/ElrondNetwork/elrond-go/process/unsigned" + "github.com/ElrondNetwork/elrond-go/sharding" ) type interceptorsContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - nodesCoordinator sharding.NodesCoordinator + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - messenger process.TopicHandler, - store dataRetriever.StorageService, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - keyGen crypto.KeyGenerator, - singleSigner crypto.SingleSigner, - multiSigner crypto.MultiSigner, - dataPool dataRetriever.PoolsHolder, - addrConverter state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + messenger process.TopicHandler, + store dataRetriever.StorageService, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + keyGen crypto.KeyGenerator, + singleSigner crypto.SingleSigner, + multiSigner crypto.MultiSigner, + dataPool dataRetriever.PoolsHolder, + addrConverter state.AddressConverter, ) (*interceptorsContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if messenger == nil { - return nil, process.ErrNilMessenger - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilBlockChain - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if keyGen == nil || keyGen.IsInterfaceNil() { - return nil, process.ErrNilKeyGen - } - if singleSigner == nil || singleSigner.IsInterfaceNil() { - return nil, process.ErrNilSingleSigner - } - if multiSigner == nil || multiSigner.IsInterfaceNil() { - return nil, process.ErrNilMultiSigVerifier - } - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if addrConverter == nil || addrConverter.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { - return nil, process.ErrNilNodesCoordinator - } - - return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if messenger == nil { + return nil, process.ErrNilMessenger + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilBlockChain + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if keyGen == nil || keyGen.IsInterfaceNil() { + return nil, process.ErrNilKeyGen + } + if singleSigner == nil || singleSigner.IsInterfaceNil() { + return nil, process.ErrNilSingleSigner + } + if multiSigner == nil || multiSigner.IsInterfaceNil() { + return nil, process.ErrNilMultiSigVerifier + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator + } + + return &interceptorsContainerFactory{ + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, + }, nil } // Create returns an interceptor container that will hold all interceptors in the system func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { - container := containers.NewInterceptorsContainer() - - keys, interceptorSlice, err := icf.generateTxInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateUnsignedTxsInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateRewardTxInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateHdrInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateMiniBlocksInterceptors() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generatePeerChBlockBodyInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - keys, interceptorSlice, err = icf.generateMetachainHeaderInterceptor() - if err != nil { - return nil, err - } - - err = container.AddMultiple(keys, interceptorSlice) - if err != nil { - return nil, err - } - - return container, nil + container := containers.NewInterceptorsContainer() + + keys, interceptorSlice, err := icf.generateTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateUnsignedTxsInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateRewardTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateHdrInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateMiniBlocksInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generatePeerChBlockBodyInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + keys, interceptorSlice, err = icf.generateMetachainHeaderInterceptor() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + + return container, nil } func (icf *interceptorsContainerFactory) createTopicAndAssignHandler( - topic string, - interceptor process.Interceptor, - createChannel bool, + topic string, + interceptor process.Interceptor, + createChannel bool, ) (process.Interceptor, error) { - err := icf.messenger.CreateTopic(topic, createChannel) - if err != nil { - return nil, err - } + err := icf.messenger.CreateTopic(topic, createChannel) + if err != nil { + return nil, err + } - return interceptor, icf.messenger.RegisterMessageProcessor(topic, interceptor) + return interceptor, icf.messenger.RegisterMessageProcessor(topic, interceptor) } //------- Tx interceptors func (icf *interceptorsContainerFactory) generateTxInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierTx - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierTx + interceptorSlice[int(idx)] = interceptor + } - //tx interceptor for metachain topic - identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + //tx interceptor for metachain topic + identifierTx := factory.TransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier string) (process.Interceptor, error) { - //TODO implement other TxHandlerProcessValidator that will check the tx nonce against account's nonce - txValidator, err := dataValidators.NewNilTxValidator() - if err != nil { - return nil, err - } - - interceptor, err := transaction.NewTxInterceptor( - icf.marshalizer, - icf.dataPool.Transactions(), - txValidator, - icf.addrConverter, - icf.hasher, - icf.singleSigner, - icf.keyGen, - icf.shardCoordinator) - - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + //TODO implement other TxHandlerProcessValidator that will check the tx nonce against account's nonce + txValidator, err := dataValidators.NewNilTxValidator() + if err != nil { + return nil, err + } + + interceptor, err := transaction.NewTxInterceptor( + icf.marshalizer, + icf.dataPool.Transactions(), + txValidator, + icf.addrConverter, + icf.hasher, + icf.singleSigner, + icf.keyGen, + icf.shardCoordinator) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Reward transactions interceptors func (icf *interceptorsContainerFactory) generateRewardTxInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierScr - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } - identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(identifier string) (process.Interceptor, error) { - rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) - - interceptor, err := rewardTransaction.NewRewardTxInterceptor( - icf.marshalizer, - icf.dataPool.RewardTransactions(), - rewardTxStorer, - icf.addrConverter, - icf.hasher, - icf.shardCoordinator, - ) - - if err != nil { - return nil, err - } - - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) + + interceptor, err := rewardTransaction.NewRewardTxInterceptor( + icf.marshalizer, + icf.dataPool.RewardTransactions(), + rewardTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator, + ) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Unsigned transactions interceptors func (icf *interceptorsContainerFactory) generateUnsignedTxsInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator + shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() + noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierScr := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneUnsignedTxInterceptor(identifierScr) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneUnsignedTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierScr - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } - identifierTx := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + identifierTx := factory.UnsignedTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - interceptor, err := icf.createOneUnsignedTxInterceptor(identifierTx) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneUnsignedTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } - keys = append(keys, identifierTx) - interceptorSlice = append(interceptorSlice, interceptor) - return keys, interceptorSlice, nil + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneUnsignedTxInterceptor(identifier string) (process.Interceptor, error) { - uTxStorer := icf.store.GetStorer(dataRetriever.UnsignedTransactionUnit) + uTxStorer := icf.store.GetStorer(dataRetriever.UnsignedTransactionUnit) - interceptor, err := unsigned.NewUnsignedTxInterceptor( - icf.marshalizer, - icf.dataPool.UnsignedTransactions(), - uTxStorer, - icf.addrConverter, - icf.hasher, - icf.shardCoordinator) + interceptor, err := unsigned.NewUnsignedTxInterceptor( + icf.marshalizer, + icf.dataPool.UnsignedTransactions(), + uTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- Hdr interceptor func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, nil, err - } - - //only one intrashard header topic - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - interceptor, err := interceptors.NewHeaderInterceptor( - icf.marshalizer, - icf.dataPool.Headers(), - icf.dataPool.HeadersNonces(), - hdrValidator, - icf.multiSigner, - icf.hasher, - icf.shardCoordinator, - icf.nodesCoordinator, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []process.Interceptor{interceptor}, nil + shardC := icf.shardCoordinator + //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce + // against blockchain's latest nonce - k finality + hdrValidator, err := dataValidators.NewNilHeaderValidator() + if err != nil { + return nil, nil, err + } + + //only one intrashard header topic + identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + interceptor, err := interceptors.NewHeaderInterceptor( + icf.marshalizer, + icf.dataPool.Headers(), + icf.dataPool.HeadersNonces(), + hdrValidator, + icf.multiSigner, + icf.hasher, + icf.shardCoordinator, + icf.nodesCoordinator, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []process.Interceptor{interceptor}, nil } //------- MiniBlocks interceptors func (icf *interceptorsContainerFactory) generateMiniBlocksInterceptors() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - noOfShards := shardC.NumberOfShards() - keys := make([]string, noOfShards) - interceptorSlice := make([]process.Interceptor, noOfShards) + shardC := icf.shardCoordinator + noOfShards := shardC.NumberOfShards() + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) - for idx := uint32(0); idx < noOfShards; idx++ { - identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) + for idx := uint32(0); idx < noOfShards; idx++ { + identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) - interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) - if err != nil { - return nil, nil, err - } + interceptor, err := icf.createOneMiniBlocksInterceptor(identifierMiniBlocks) + if err != nil { + return nil, nil, err + } - keys[int(idx)] = identifierMiniBlocks - interceptorSlice[int(idx)] = interceptor - } + keys[int(idx)] = identifierMiniBlocks + interceptorSlice[int(idx)] = interceptor + } - return keys, interceptorSlice, nil + return keys, interceptorSlice, nil } func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(identifier string) (process.Interceptor, error) { - txBlockBodyStorer := icf.store.GetStorer(dataRetriever.MiniBlockUnit) + txBlockBodyStorer := icf.store.GetStorer(dataRetriever.MiniBlockUnit) - interceptor, err := interceptors.NewTxBlockBodyInterceptor( - icf.marshalizer, - icf.dataPool.MiniBlocks(), - txBlockBodyStorer, - icf.hasher, - icf.shardCoordinator, - ) + interceptor, err := interceptors.NewTxBlockBodyInterceptor( + icf.marshalizer, + icf.dataPool.MiniBlocks(), + txBlockBodyStorer, + icf.hasher, + icf.shardCoordinator, + ) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - return icf.createTopicAndAssignHandler(identifier, interceptor, true) + return icf.createTopicAndAssignHandler(identifier, interceptor, true) } //------- PeerChBlocks interceptor func (icf *interceptorsContainerFactory) generatePeerChBlockBodyInterceptor() ([]string, []process.Interceptor, error) { - shardC := icf.shardCoordinator - - //only one intrashard peer change blocks topic - identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - peerBlockBodyStorer := icf.store.GetStorer(dataRetriever.PeerChangesUnit) - - interceptor, err := interceptors.NewPeerBlockBodyInterceptor( - icf.marshalizer, - icf.dataPool.PeerChangesBlocks(), - peerBlockBodyStorer, - icf.hasher, - shardC, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierPeerCh, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierPeerCh}, []process.Interceptor{interceptor}, nil + shardC := icf.shardCoordinator + + //only one intrashard peer change blocks topic + identifierPeerCh := factory.PeerChBodyTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + peerBlockBodyStorer := icf.store.GetStorer(dataRetriever.PeerChangesUnit) + + interceptor, err := interceptors.NewPeerBlockBodyInterceptor( + icf.marshalizer, + icf.dataPool.PeerChangesBlocks(), + peerBlockBodyStorer, + icf.hasher, + shardC, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierPeerCh, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierPeerCh}, []process.Interceptor{interceptor}, nil } //------- MetachainHeader interceptors func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([]string, []process.Interceptor, error) { - identifierHdr := factory.MetachainBlocksTopic - //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce - // against blockchain's latest nonce - k finality - hdrValidator, err := dataValidators.NewNilHeaderValidator() - if err != nil { - return nil, nil, err - } - - interceptor, err := interceptors.NewMetachainHeaderInterceptor( - icf.marshalizer, - icf.dataPool.MetaBlocks(), - icf.dataPool.HeadersNonces(), - hdrValidator, - icf.multiSigner, - icf.hasher, - icf.shardCoordinator, - icf.nodesCoordinator, - ) - if err != nil { - return nil, nil, err - } - _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []process.Interceptor{interceptor}, nil + identifierHdr := factory.MetachainBlocksTopic + //TODO implement other HeaderHandlerProcessValidator that will check the header's nonce + // against blockchain's latest nonce - k finality + hdrValidator, err := dataValidators.NewNilHeaderValidator() + if err != nil { + return nil, nil, err + } + + interceptor, err := interceptors.NewMetachainHeaderInterceptor( + icf.marshalizer, + icf.dataPool.MetaBlocks(), + icf.dataPool.HeadersNonces(), + hdrValidator, + icf.multiSigner, + icf.hasher, + icf.shardCoordinator, + icf.nodesCoordinator, + ) + if err != nil { + return nil, nil, err + } + _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) + if err != nil { + return nil, nil, err + } + + return []string{identifierHdr}, []process.Interceptor{interceptor}, nil } // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { - if icf == nil { - return true - } - return false + if icf == nil { + return true + } + return false } diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 1032e3509e4..9d762fbdf14 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -1,649 +1,649 @@ package shard_test import ( - "errors" - "strings" - "testing" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "errors" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) var errExpected = errors.New("expected error") func createStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) process.TopicHandler { - return &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - if matchStrToErrOnCreate == "" { - return nil - } - if strings.Contains(name, matchStrToErrOnCreate) { - return errExpected - } - - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - if matchStrToErrOnRegister == "" { - return nil - } - if strings.Contains(topic, matchStrToErrOnRegister) { - return errExpected - } - - return nil - }, - } + return &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if matchStrToErrOnCreate == "" { + return nil + } + if strings.Contains(name, matchStrToErrOnCreate) { + return errExpected + } + + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + if matchStrToErrOnRegister == "" { + return nil + } + if strings.Contains(topic, matchStrToErrOnRegister) { + return errExpected + } + + return nil + }, + } } func createDataPools() dataRetriever.PoolsHolder { - pools := &mock.PoolsHolderStub{} - pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - } - pools.MiniBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - return pools + pools := &mock.PoolsHolderStub{} + pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.HeadersCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return &mock.Uint64SyncMapCacherStub{} + } + pools.MiniBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + return pools } func createStore() *mock.ChainStorerMock { - return &mock.ChainStorerMock{ - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{} - }, - } + return &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{} + }, + } } //------- NewInterceptorsContainerFactory func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - nil, - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + nil, + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - nil, - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilNodesCoordinator, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) } func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - nil, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + nil, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMessenger, err) } func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - nil, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilBlockChain, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - nil, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMarshalizer, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + nil, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - nil, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilHasher, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + nil, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilHasher, err) } func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - nil, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilKeyGen, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilKeyGen, err) } func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - nil, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilSingleSigner, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + nil, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilSingleSigner, err) } func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - nil, - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMultiSigVerifier, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + nil, + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMultiSigVerifier, err) } func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - nil, - &mock.AddressConverterMock{}, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilDataPoolHolder, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + nil, + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilDataPoolHolder, err) } func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - nil, - ) - - assert.Nil(t, icf) - assert.Equal(t, process.ErrNilAddressConverter, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilAddressConverter, err) } func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { - t.Parallel() - - icf, err := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{}, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - assert.NotNil(t, icf) - assert.Nil(t, err) + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.NotNil(t, icf) + assert.Nil(t, err) } //------- Create func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.TransactionTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.TransactionTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.HeadersTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.HeadersTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.MiniBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.MiniBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.PeerChBodyTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.PeerChBodyTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.MetachainBlocksTopic, ""), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler(factory.MetachainBlocksTopic, ""), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.TransactionTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.TransactionTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.HeadersTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.HeadersTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.MiniBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.MiniBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.PeerChBodyTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.PeerChBodyTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.MetachainBlocksTopic), - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.MetachainBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) } func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { - t.Parallel() - - icf, _ := shard.NewInterceptorsContainerFactory( - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - return nil - }, - }, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, err := icf.Create() - - assert.NotNil(t, container) - assert.Nil(t, err) + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return nil + }, + }, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, err := icf.Create() + + assert.NotNil(t, container) + assert.Nil(t, err) } func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { - t.Parallel() - - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &mock.NodesCoordinatorMock{ - ShardId: 1, - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - } - - icf, _ := shard.NewInterceptorsContainerFactory( - shardCoordinator, - nodesCoordinator, - &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { - return nil - }, - }, - createStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.SingleSignKeyGenMock{}, - &mock.SignerMock{}, - mock.NewMultiSigner(), - createDataPools(), - &mock.AddressConverterMock{}, - ) - - container, _ := icf.Create() - - numInterceptorTxs := noOfShards + 1 - numInterceptorsUnsignedTxs := numInterceptorTxs - numInterceptorsRewardTxs := numInterceptorTxs - numInterceptorHeaders := 1 - numInterceptorMiniBlocks := noOfShards - numInterceptorPeerChanges := 1 - numInterceptorMetachainHeaders := 1 - totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + - numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + - numInterceptorsRewardTxs - - assert.Equal(t, totalInterceptors, container.Len()) + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + icf, _ := shard.NewInterceptorsContainerFactory( + shardCoordinator, + nodesCoordinator, + &mock.TopicHandlerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + return nil + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return nil + }, + }, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + container, _ := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := numInterceptorTxs + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + numInterceptorPeerChanges := 1 + numInterceptorMetachainHeaders := 1 + totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + + numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + + numInterceptorsRewardTxs + + assert.Equal(t, totalInterceptors, container.Len()) } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index a0e7d54ed70..5daadb6c230 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -1,129 +1,129 @@ package shard import ( - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" - "github.com/ElrondNetwork/elrond-go/process/factory/containers" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/sharding" ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConverter state.AddressConverter - specialAddressHandler process.SpecialAddressHandler - store dataRetriever.StorageService - poolsHolder dataRetriever.PoolsHolder + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConverter state.AddressConverter + specialAddressHandler process.SpecialAddressHandler + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object func NewIntermediateProcessorsContainerFactory( - shardCoordinator sharding.Coordinator, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - addrConverter state.AddressConverter, - specialAddressHandler process.SpecialAddressHandler, - store dataRetriever.StorageService, - poolsHolder dataRetriever.PoolsHolder, + shardCoordinator sharding.Coordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConverter state.AddressConverter, + specialAddressHandler process.SpecialAddressHandler, + store dataRetriever.StorageService, + poolsHolder dataRetriever.PoolsHolder, ) (*intermediateProcessorsContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if addrConverter == nil || addrConverter.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { - return nil, process.ErrNilSpecialAddressHandler - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilStorage - } - if poolsHolder == nil { - return nil, process.ErrNilPoolsHolder - } - - return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - addrConverter: addrConverter, - specialAddressHandler: specialAddressHandler, - store: store, - poolsHolder: poolsHolder, - }, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if poolsHolder == nil { + return nil, process.ErrNilPoolsHolder + } + + return &intermediateProcessorsContainerFactory{ + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + addrConverter: addrConverter, + specialAddressHandler: specialAddressHandler, + store: store, + poolsHolder: poolsHolder, + }, nil } // Create returns a preprocessor container that will hold all preprocessors in the system func (ppcm *intermediateProcessorsContainerFactory) Create() (process.IntermediateProcessorContainer, error) { - container := containers.NewIntermediateTransactionHandlersContainer() + container := containers.NewIntermediateTransactionHandlersContainer() - interproc, err := ppcm.createSmartContractResultsIntermediateProcessor() - if err != nil { - return nil, err - } + interproc, err := ppcm.createSmartContractResultsIntermediateProcessor() + if err != nil { + return nil, err + } - err = container.Add(block.SmartContractResultBlock, interproc) - if err != nil { - return nil, err - } + err = container.Add(block.SmartContractResultBlock, interproc) + if err != nil { + return nil, err + } - interproc, err = ppcm.createRewardsTxIntermediateProcessor() - if err != nil { - return nil, err - } + interproc, err = ppcm.createRewardsTxIntermediateProcessor() + if err != nil { + return nil, err + } - err = container.Add(block.RewardsBlock, interproc) - if err != nil { - return nil, err - } + err = container.Add(block.RewardsBlock, interproc) + if err != nil { + return nil, err + } - return container, nil + return container, nil } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewIntermediateResultsProcessor( - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.addrConverter, - ppcm.store, - block.SmartContractResultBlock, - ) - - return irp, err + irp, err := preprocess.NewIntermediateResultsProcessor( + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + block.SmartContractResultBlock, + ) + + return irp, err } func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewRewardTxHandler( - ppcm.specialAddressHandler, - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.addrConverter, - ppcm.store, - ppcm.poolsHolder.RewardTransactions(), - ) - - return irp, err + irp, err := preprocess.NewRewardTxHandler( + ppcm.specialAddressHandler, + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + ppcm.poolsHolder.RewardTransactions(), + ) + + return irp, err } // IsInterfaceNil returns true if there is no value under the interface func (ppcm *intermediateProcessorsContainerFactory) IsInterfaceNil() bool { - if ppcm == nil { - return true - } - return false + if ppcm == nil { + return true + } + return false } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index f2315293152..3c596728885 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -1,139 +1,139 @@ package shard_test import ( - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/stretchr/testify/assert" - "testing" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" + "testing" ) func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - nil, - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilShardCoordinator, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilShardCoordinator, err) } func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - nil, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilMarshalizer, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilMarshalizer, err) } func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - nil, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilHasher, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilHasher, err) } func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - nil, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilAddressConverter, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilAddressConverter, err) } func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - nil, - dPool, - ) - - assert.Nil(t, ipcf) - assert.Equal(t, process.ErrNilStorage, err) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + nil, + dPool, + ) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilStorage, err) } func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, err) - assert.NotNil(t, ipcf) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, err) + assert.NotNil(t, ipcf) } func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { - t.Parallel() - - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - &mock.AddressConverterMock{}, - &mock.SpecialAddressHandlerMock{}, - &mock.ChainStorerMock{}, - dPool, - ) - - assert.Nil(t, err) - assert.NotNil(t, ipcf) - - container, err := ipcf.Create() - assert.Nil(t, err) - assert.Equal(t, 2, container.Len()) + t.Parallel() + + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, + &mock.ChainStorerMock{}, + dPool, + ) + + assert.Nil(t, err) + assert.NotNil(t, ipcf) + + container, err := ipcf.Create() + assert.Nil(t, err) + assert.Equal(t, 2, container.Len()) } diff --git a/process/interface.go b/process/interface.go index a162dbcf33e..53059f4c4d5 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1,382 +1,382 @@ package process import ( - "math/big" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "math/big" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine type TransactionProcessor interface { - ProcessTransaction(transaction *transaction.Transaction, round uint64) error - IsInterfaceNil() bool + ProcessTransaction(transaction *transaction.Transaction, round uint64) error + IsInterfaceNil() bool } // RewardTransactionProcessor is the interface for reward transaction execution engine type RewardTransactionProcessor interface { - ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error - IsInterfaceNil() bool + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error + IsInterfaceNil() bool } // RewardTransactionPreProcessor prepares the processing of reward transactions type RewardTransactionPreProcessor interface { - AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) - IsInterfaceNil() bool + AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) + IsInterfaceNil() bool } // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { - ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error - IsInterfaceNil() bool + ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error + IsInterfaceNil() bool } // TxTypeHandler is an interface to calculate the transaction type type TxTypeHandler interface { - ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) - IsInterfaceNil() bool + ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) + IsInterfaceNil() bool } // TxValidator can determine if a provided transaction handler is valid or not from the process point of view type TxValidator interface { - IsTxValidForProcessing(txHandler data.TransactionHandler) bool - IsInterfaceNil() bool + IsTxValidForProcessing(txHandler data.TransactionHandler) bool + IsInterfaceNil() bool } // HeaderValidator can determine if a provided header handler is valid or not from the process point of view type HeaderValidator interface { - IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool - IsInterfaceNil() bool + IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool + IsInterfaceNil() bool } // TransactionCoordinator is an interface to coordinate transaction processing using multiple processors type TransactionCoordinator interface { - RequestMiniBlocks(header data.HeaderHandler) - RequestBlockTransactions(body block.Body) - IsDataPreparedForProcessing(haveTime func() time.Duration) error + RequestMiniBlocks(header data.HeaderHandler) + RequestBlockTransactions(body block.Body) + IsDataPreparedForProcessing(haveTime func() time.Duration) error - SaveBlockDataToStorage(body block.Body) error - RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) - RemoveBlockDataFromPool(body block.Body) error + SaveBlockDataToStorage(body block.Body) error + RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) + RemoveBlockDataFromPool(body block.Body) error - ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error - CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) - CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice + CreateBlockStarted() + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice - CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) + CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) - GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler + GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactions(body block.Body) error - IsInterfaceNil() bool + VerifyCreatedBlockTransactions(body block.Body) error + IsInterfaceNil() bool } // SmartContractProcessor is the main interface for the smart contract caller engine type SmartContractProcessor interface { - ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) - ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error - DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error - IsInterfaceNil() bool + ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) + ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error + DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error + IsInterfaceNil() bool } // IntermediateTransactionHandler handles transactions which are not resolved in only one step type IntermediateTransactionHandler interface { - AddIntermediateTransactions(txs []data.TransactionHandler) error - CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock - VerifyInterMiniBlocks(body block.Body) error - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - SaveCurrentIntermediateTxToStorage() error - GetAllCurrentFinishedTxs() map[string]data.TransactionHandler - CreateBlockStarted() - IsInterfaceNil() bool + AddIntermediateTransactions(txs []data.TransactionHandler) error + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + VerifyInterMiniBlocks(body block.Body) error + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + SaveCurrentIntermediateTxToStorage() error + GetAllCurrentFinishedTxs() map[string]data.TransactionHandler + CreateBlockStarted() + IsInterfaceNil() bool } // TransactionVerifier interface validates if the transaction is good and if it should be processed type TransactionVerifier interface { - IsTransactionValid(tx data.TransactionHandler) error + IsTransactionValid(tx data.TransactionHandler) error } // UnsignedTxHandler creates and verifies unsigned transactions for current round type TransactionFeeHandler interface { - ProcessTransactionFee(cost *big.Int) - IsInterfaceNil() bool + ProcessTransactionFee(cost *big.Int) + IsInterfaceNil() bool } // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { - SetElrondCommunityAddress(elrond []byte) - ElrondCommunityAddress() []byte - SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) - ConsensusRewardAddresses() []string - LeaderAddress() []byte - BurnAddress() []byte - ShardIdForAddress([]byte) (uint32, error) - Round() uint64 - Epoch() uint32 - IsInterfaceNil() bool + SetElrondCommunityAddress(elrond []byte) + ElrondCommunityAddress() []byte + SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) + ConsensusRewardAddresses() []string + LeaderAddress() []byte + BurnAddress() []byte + ShardIdForAddress([]byte) (uint32, error) + Round() uint64 + Epoch() uint32 + IsInterfaceNil() bool } // PreProcessor is an interface used to prepare and process transaction data type PreProcessor interface { - CreateBlockStarted() - IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error + CreateBlockStarted() + IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error - RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) - SaveTxBlockToStorage(body block.Body) error + RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error + RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error - RequestBlockTransactions(body block.Body) int + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + RequestBlockTransactions(body block.Body) int - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - RequestTransactionsForMiniBlock(mb block.MiniBlock) int - ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error - CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + RequestTransactionsForMiniBlock(mb block.MiniBlock) int + ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) - GetAllCurrentUsedTxs() map[string]data.TransactionHandler - IsInterfaceNil() bool + GetAllCurrentUsedTxs() map[string]data.TransactionHandler + IsInterfaceNil() bool } // BlockProcessor is the main interface for block execution engine type BlockProcessor interface { - ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountState() - CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBody(dta []byte) data.BodyHandler - DecodeBlockHeader(dta []byte) data.HeaderHandler - AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) - IsInterfaceNil() bool + ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountState() + CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBody(dta []byte) data.BodyHandler + DecodeBlockHeader(dta []byte) data.HeaderHandler + AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) + IsInterfaceNil() bool } // Checker provides functionality to checks the integrity and validity of a data structure type Checker interface { - // IntegrityAndValidity does both validity and integrity checks on the data structure - IntegrityAndValidity(coordinator sharding.Coordinator) error - // Integrity checks only the integrity of the data - Integrity(coordinator sharding.Coordinator) error - // IsInterfaceNil returns true if there is no value under the interface - IsInterfaceNil() bool + // IntegrityAndValidity does both validity and integrity checks on the data structure + IntegrityAndValidity(coordinator sharding.Coordinator) error + // Integrity checks only the integrity of the data + Integrity(coordinator sharding.Coordinator) error + // IsInterfaceNil returns true if there is no value under the interface + IsInterfaceNil() bool } // SigVerifier provides functionality to verify a signature of a signed data structure that holds also the verifying parameters type SigVerifier interface { - VerifySig() error + VerifySig() error } // SignedDataValidator provides functionality to check the validity and signature of a data structure type SignedDataValidator interface { - SigVerifier - Checker + SigVerifier + Checker } // HashAccesser interface provides functionality over hashable objects type HashAccesser interface { - SetHash([]byte) - Hash() []byte + SetHash([]byte) + Hash() []byte } // InterceptedBlockBody interface provides functionality over intercepted blocks type InterceptedBlockBody interface { - Checker - HashAccesser - GetUnderlyingObject() interface{} + Checker + HashAccesser + GetUnderlyingObject() interface{} } // Bootstrapper is an interface that defines the behaviour of a struct that is able // to synchronize the node type Bootstrapper interface { - AddSyncStateListener(func(isSyncing bool)) - ShouldSync() bool - StopSync() - StartSync() - IsInterfaceNil() bool + AddSyncStateListener(func(isSyncing bool)) + ShouldSync() bool + StopSync() + StartSync() + IsInterfaceNil() bool } // ForkDetector is an interface that defines the behaviour of a struct that is able // to detect forks type ForkDetector interface { - AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error - RemoveHeaders(nonce uint64, hash []byte) - CheckFork() (forkDetected bool, nonce uint64, hash []byte) - GetHighestFinalBlockNonce() uint64 - ProbableHighestNonce() uint64 - ResetProbableHighestNonceIfNeeded() - IsInterfaceNil() bool + AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error + RemoveHeaders(nonce uint64, hash []byte) + CheckFork() (forkDetected bool, nonce uint64, hash []byte) + GetHighestFinalBlockNonce() uint64 + ProbableHighestNonce() uint64 + ResetProbableHighestNonceIfNeeded() + IsInterfaceNil() bool } // InterceptorsContainer defines an interceptors holder data type with basic functionality type InterceptorsContainer interface { - Get(key string) (Interceptor, error) - Add(key string, val Interceptor) error - AddMultiple(keys []string, interceptors []Interceptor) error - Replace(key string, val Interceptor) error - Remove(key string) - Len() int - IsInterfaceNil() bool + Get(key string) (Interceptor, error) + Add(key string, val Interceptor) error + AddMultiple(keys []string, interceptors []Interceptor) error + Replace(key string, val Interceptor) error + Remove(key string) + Len() int + IsInterfaceNil() bool } // InterceptorsContainerFactory defines the functionality to create an interceptors container type InterceptorsContainerFactory interface { - Create() (InterceptorsContainer, error) - IsInterfaceNil() bool + Create() (InterceptorsContainer, error) + IsInterfaceNil() bool } // PreProcessorsContainer defines an PreProcessors holder data type with basic functionality type PreProcessorsContainer interface { - Get(key block.Type) (PreProcessor, error) - Add(key block.Type, val PreProcessor) error - AddMultiple(keys []block.Type, preprocessors []PreProcessor) error - Replace(key block.Type, val PreProcessor) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (PreProcessor, error) + Add(key block.Type, val PreProcessor) error + AddMultiple(keys []block.Type, preprocessors []PreProcessor) error + Replace(key block.Type, val PreProcessor) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // PreProcessorsContainerFactory defines the functionality to create an PreProcessors container type PreProcessorsContainerFactory interface { - Create() (PreProcessorsContainer, error) - IsInterfaceNil() bool + Create() (PreProcessorsContainer, error) + IsInterfaceNil() bool } // IntermediateProcessorContainer defines an IntermediateProcessor holder data type with basic functionality type IntermediateProcessorContainer interface { - Get(key block.Type) (IntermediateTransactionHandler, error) - Add(key block.Type, val IntermediateTransactionHandler) error - AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error - Replace(key block.Type, val IntermediateTransactionHandler) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (IntermediateTransactionHandler, error) + Add(key block.Type, val IntermediateTransactionHandler) error + AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error + Replace(key block.Type, val IntermediateTransactionHandler) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // IntermediateProcessorsContainerFactory defines the functionality to create an IntermediateProcessors container type IntermediateProcessorsContainerFactory interface { - Create() (IntermediateProcessorContainer, error) - IsInterfaceNil() bool + Create() (IntermediateProcessorContainer, error) + IsInterfaceNil() bool } // VirtualMachinesContainer defines a virtual machine holder data type with basic functionality type VirtualMachinesContainer interface { - Get(key []byte) (vmcommon.VMExecutionHandler, error) - Add(key []byte, val vmcommon.VMExecutionHandler) error - AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error - Replace(key []byte, val vmcommon.VMExecutionHandler) error - Remove(key []byte) - Len() int - Keys() [][]byte - IsInterfaceNil() bool + Get(key []byte) (vmcommon.VMExecutionHandler, error) + Add(key []byte, val vmcommon.VMExecutionHandler) error + AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error + Replace(key []byte, val vmcommon.VMExecutionHandler) error + Remove(key []byte) + Len() int + Keys() [][]byte + IsInterfaceNil() bool } // VirtualMachinesContainerFactory defines the functionality to create a virtual machine container type VirtualMachinesContainerFactory interface { - Create() (VirtualMachinesContainer, error) - VMAccountsDB() *hooks.VMAccountsDB - IsInterfaceNil() bool + Create() (VirtualMachinesContainer, error) + VMAccountsDB() *hooks.VMAccountsDB + IsInterfaceNil() bool } // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P) error - IsInterfaceNil() bool + ProcessReceivedMessage(message p2p.MessageP2P) error + IsInterfaceNil() bool } // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { - ConnectedPeersOnTopic(topic string) []p2p.PeerID - SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error - IsInterfaceNil() bool + ConnectedPeersOnTopic(topic string) []p2p.PeerID + SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error + IsInterfaceNil() bool } // TopicHandler defines the functionality needed by structs to manage topics and message processors type TopicHandler interface { - HasTopic(name string) bool - CreateTopic(name string, createChannelForTopic bool) error - RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error + HasTopic(name string) bool + CreateTopic(name string, createChannelForTopic bool) error + RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error } // TopicMessageHandler defines the functionality needed by structs to manage topics, message processors and to send data // to other peers type TopicMessageHandler interface { - MessageHandler - TopicHandler + MessageHandler + TopicHandler } // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { - PackDataInChunks(data [][]byte, limit int) ([][]byte, error) - IsInterfaceNil() bool + PackDataInChunks(data [][]byte, limit int) ([][]byte, error) + IsInterfaceNil() bool } // BlocksTracker defines the functionality to track all the notarised blocks type BlocksTracker interface { - UnnotarisedBlocks() []data.HeaderHandler - RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error - AddBlock(headerHandler data.HeaderHandler) - SetBlockBroadcastRound(nonce uint64, round int64) - BlockBroadcastRound(nonce uint64) int64 - IsInterfaceNil() bool + UnnotarisedBlocks() []data.HeaderHandler + RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error + AddBlock(headerHandler data.HeaderHandler) + SetBlockBroadcastRound(nonce uint64, round int64) + BlockBroadcastRound(nonce uint64) int64 + IsInterfaceNil() bool } // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { - RequestHeaderByNonce(shardId uint32, nonce uint64) - RequestTransaction(shardId uint32, txHashes [][]byte) - RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) - RequestRewardTransactions(destShardID uint32, txHashes [][]byte) - RequestMiniBlock(shardId uint32, miniblockHash []byte) - RequestHeader(shardId uint32, hash []byte) - IsInterfaceNil() bool + RequestHeaderByNonce(shardId uint32, nonce uint64) + RequestTransaction(shardId uint32, txHashes [][]byte) + RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) + RequestMiniBlock(shardId uint32, miniblockHash []byte) + RequestHeader(shardId uint32, hash []byte) + IsInterfaceNil() bool } // ArgumentsParser defines the functionality to parse transaction data into arguments and code for smart contracts type ArgumentsParser interface { - GetArguments() ([]*big.Int, error) - GetCode() ([]byte, error) - GetFunction() (string, error) - ParseData(data string) error + GetArguments() ([]*big.Int, error) + GetCode() ([]byte, error) + GetFunction() (string, error) + ParseData(data string) error - CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string - GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) - IsInterfaceNil() bool + CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string + GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) + IsInterfaceNil() bool } // TemporaryAccountsHandler defines the functionality to create temporary accounts and pass to VM. // This holder will contain usually one account from shard X that calls a SC in shard Y // so when executing the code in shard Y, this impl will hold an ephemeral copy of the sender account from shard X type TemporaryAccountsHandler interface { - AddTempAccount(address []byte, balance *big.Int, nonce uint64) - CleanTempAccounts() - TempAccount(address []byte) state.AccountHandler - IsInterfaceNil() bool + AddTempAccount(address []byte, balance *big.Int, nonce uint64) + CleanTempAccounts() + TempAccount(address []byte) state.AccountHandler + IsInterfaceNil() bool } // BlockSizeThrottler defines the functionality of adapting the node to the network speed/latency when it should send a // block to its peers which should be received in a limited time frame type BlockSizeThrottler interface { - MaxItemsToAdd() uint32 - Add(round uint64, items uint32) - Succeed(round uint64) - ComputeMaxItems() - IsInterfaceNil() bool + MaxItemsToAdd() uint32 + Add(round uint64, items uint32) + Succeed(round uint64) + ComputeMaxItems() + IsInterfaceNil() bool } diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 19d7073f4dd..3e3687761d0 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -1,80 +1,80 @@ package mock import ( - "math/big" - "time" + "math/big" + "time" - "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data" ) type BlockProcessorMock struct { - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - noShards uint32 - SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateGenesisBlockCalled func(balances map[string]*big.Int) (data.HeaderHandler, error) + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + noShards uint32 + SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) } func (bpm *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - return bpm.ProcessBlockCalled(blockChain, header, body, haveTime) + return bpm.ProcessBlockCalled(blockChain, header, body, haveTime) } func (bpm *BlockProcessorMock) CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error { - return bpm.CommitBlockCalled(blockChain, header, body) + return bpm.CommitBlockCalled(blockChain, header, body) } func (bpm *BlockProcessorMock) RevertAccountState() { - bpm.RevertAccountStateCalled() + bpm.RevertAccountStateCalled() } func (blProcMock BlockProcessorMock) CreateGenesisBlock(balances map[string]*big.Int) (data.HeaderHandler, error) { - return blProcMock.CreateGenesisBlockCalled(balances) + return blProcMock.CreateGenesisBlockCalled(balances) } func (blProcMock BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { - return blProcMock.CreateBlockCalled(round, haveTime) + return blProcMock.CreateBlockCalled(round, haveTime) } func (blProcMock BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error { - return blProcMock.RestoreBlockIntoPoolsCalled(header, body) + return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } func (blProcMock BlockProcessorMock) CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) + return blProcMock.CreateBlockHeaderCalled(body, round, haveTime) } func (blProcMock BlockProcessorMock) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { - return blProcMock.MarshalizedDataToBroadcastCalled(header, body) + return blProcMock.MarshalizedDataToBroadcastCalled(header, body) } func (blProcMock BlockProcessorMock) DecodeBlockBody(dta []byte) data.BodyHandler { - return blProcMock.DecodeBlockBodyCalled(dta) + return blProcMock.DecodeBlockBodyCalled(dta) } func (blProcMock BlockProcessorMock) DecodeBlockHeader(dta []byte) data.HeaderHandler { - return blProcMock.DecodeBlockHeaderCalled(dta) + return blProcMock.DecodeBlockHeaderCalled(dta) } func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) + blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string, uint64) { - panic("implement me") + panic("implement me") } // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index d70311a2291..6dc47db5627 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -1,98 +1,98 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" ) type SpecialAddressHandlerMock struct { - ElrondCommunityAddressCalled func() []byte - LeaderAddressCalled func() []byte - BurnAddressCalled func() []byte - ShardIdForAddressCalled func([]byte) (uint32, error) - AdrConv state.AddressConverter - ShardCoordinator sharding.Coordinator - - addresses []string - epoch uint32 - round uint64 + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + + addresses []string + epoch uint32 + round uint64 } func NewSpecialAddressHandlerMock( - addrConv state.AddressConverter, - shardCoordinator sharding.Coordinator, + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, ) *SpecialAddressHandlerMock { - return &SpecialAddressHandlerMock{ - ElrondCommunityAddressCalled: nil, - LeaderAddressCalled: nil, - BurnAddressCalled: nil, - ShardIdForAddressCalled: nil, - AdrConv: addrConv, - ShardCoordinator: shardCoordinator, - addresses: nil, - epoch: 0, - round: 0, - } + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + addresses: nil, + epoch: 0, + round: 0, + } } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses - sh.epoch = epoch - sh.round = round + sh.addresses = consensusRewardAddresses + sh.epoch = epoch + sh.round = round } func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses + return sh.addresses } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { - if sh.BurnAddressCalled == nil { - return []byte("burn") - } + if sh.BurnAddressCalled == nil { + return []byte("burn") + } - return sh.BurnAddressCalled() + return sh.BurnAddressCalled() } func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { - if sh.ElrondCommunityAddressCalled == nil { - return []byte("elrond") - } + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond") + } - return sh.ElrondCommunityAddressCalled() + return sh.ElrondCommunityAddressCalled() } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + return sh.round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + return sh.epoch } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { - if sh.LeaderAddressCalled == nil { - return []byte("leader") - } + if sh.LeaderAddressCalled == nil { + return []byte("leader") + } - return sh.LeaderAddressCalled() + return sh.LeaderAddressCalled() } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { - convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return 0, err - } + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } - return sh.ShardCoordinator.ComputeId(convAdr), nil + return sh.ShardCoordinator.ComputeId(convAdr), nil } // IsInterfaceNil returns true if there is no value under the interface func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { - if sh == nil { - return true - } - return false + if sh == nil { + return true + } + return false } diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go index 774bc2e7b64..8a97f316786 100644 --- a/process/rewardTransaction/interceptedRewardTransaction.go +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -1,149 +1,149 @@ package rewardTransaction import ( - "math/big" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) // InterceptedRewardTransaction holds and manages a transaction based struct with extended functionality type InterceptedRewardTransaction struct { - rTx *rewardTx.RewardTx - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConv state.AddressConverter - coordinator sharding.Coordinator - hash []byte - rcvShard uint32 - sndShard uint32 - isAddressedToOtherShards bool + rTx *rewardTx.RewardTx + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConv state.AddressConverter + coordinator sharding.Coordinator + hash []byte + rcvShard uint32 + sndShard uint32 + isAddressedToOtherShards bool } // NewInterceptedRewardTransaction returns a new instance of InterceptedRewardTransaction func NewInterceptedRewardTransaction( - rewardTxBuff []byte, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - addrConv state.AddressConverter, - coordinator sharding.Coordinator, + rewardTxBuff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConv state.AddressConverter, + coordinator sharding.Coordinator, ) (*InterceptedRewardTransaction, error) { - if rewardTxBuff == nil { - return nil, process.ErrNilBuffer - } - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if addrConv == nil { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - rTx := &rewardTx.RewardTx{} - err := marshalizer.Unmarshal(rTx, rewardTxBuff) - if err != nil { - return nil, err - } - - inRewardTx := &InterceptedRewardTransaction{ - rTx: rTx, - marshalizer: marshalizer, - hasher: hasher, - addrConv: addrConv, - coordinator: coordinator, - } - - err = inRewardTx.processFields(rewardTxBuff) - if err != nil { - return nil, err - } - - err = inRewardTx.integrity() - if err != nil { - return nil, err - } - - err = inRewardTx.verifyIfNotarized(inRewardTx.hash) - if err != nil { - return nil, err - } - - return inRewardTx, nil + if rewardTxBuff == nil { + return nil, process.ErrNilBuffer + } + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if addrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rTx := &rewardTx.RewardTx{} + err := marshalizer.Unmarshal(rTx, rewardTxBuff) + if err != nil { + return nil, err + } + + inRewardTx := &InterceptedRewardTransaction{ + rTx: rTx, + marshalizer: marshalizer, + hasher: hasher, + addrConv: addrConv, + coordinator: coordinator, + } + + err = inRewardTx.processFields(rewardTxBuff) + if err != nil { + return nil, err + } + + err = inRewardTx.integrity() + if err != nil { + return nil, err + } + + err = inRewardTx.verifyIfNotarized(inRewardTx.hash) + if err != nil { + return nil, err + } + + return inRewardTx, nil } func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) error { - inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) + inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) - rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) - if err != nil { - return process.ErrInvalidRcvAddr - } + rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) + if err != nil { + return process.ErrInvalidRcvAddr + } - inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) - inRTx.sndShard = inRTx.rTx.ShardId + inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) + inRTx.sndShard = inRTx.rTx.ShardId - inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && - inRTx.sndShard != inRTx.coordinator.SelfId() + inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && + inRTx.sndShard != inRTx.coordinator.SelfId() - return nil + return nil } // integrity checks for not nil fields and negative value func (inRTx *InterceptedRewardTransaction) integrity() error { - if len(inRTx.rTx.RcvAddr) == 0 { - return process.ErrNilRcvAddr - } + if len(inRTx.rTx.RcvAddr) == 0 { + return process.ErrNilRcvAddr + } - if inRTx.rTx.Value == nil { - return process.ErrNilValue - } + if inRTx.rTx.Value == nil { + return process.ErrNilValue + } - if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { - return process.ErrNegativeValue - } + if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { + return process.ErrNegativeValue + } - return nil + return nil } // verifyIfNotarized checks if the rewardTx was already notarized func (inRTx *InterceptedRewardTransaction) verifyIfNotarized(rTxBuff []byte) error { - // TODO: implement this for flood protection purposes - // could verify if the epoch/round is behind last committed metachain block - return nil + // TODO: implement this for flood protection purposes + // could verify if the epoch/round is behind last committed metachain block + return nil } // RcvShard returns the receiver shard func (inRTx *InterceptedRewardTransaction) RcvShard() uint32 { - return inRTx.rcvShard + return inRTx.rcvShard } // SndShard returns the sender shard func (inRTx *InterceptedRewardTransaction) SndShard() uint32 { - return inRTx.sndShard + return inRTx.sndShard } // IsAddressedToOtherShards returns true if this transaction is not meant to be processed by the node from this shard func (inRTx *InterceptedRewardTransaction) IsAddressedToOtherShards() bool { - return inRTx.isAddressedToOtherShards + return inRTx.isAddressedToOtherShards } // RewardTransaction returns the reward transaction pointer that actually holds the data func (inRTx *InterceptedRewardTransaction) RewardTransaction() data.TransactionHandler { - return inRTx.rTx + return inRTx.rTx } // Hash gets the hash of this transaction func (inRTx *InterceptedRewardTransaction) Hash() []byte { - return inRTx.hash + return inRTx.hash } diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go index a10d3287aa4..a0b855818fb 100644 --- a/process/rewardTransaction/interceptor.go +++ b/process/rewardTransaction/interceptor.go @@ -1,151 +1,151 @@ package rewardTransaction import ( - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) var log = logger.DefaultLogger() // RewardTxInterceptor is used for intercepting reward transactions and storing them into a datapool type RewardTxInterceptor struct { - marshalizer marshal.Marshalizer - rewardTxPool dataRetriever.ShardedDataCacherNotifier - rewardTxStorer storage.Storer - addrConverter state.AddressConverter - hasher hashing.Hasher - shardCoordinator sharding.Coordinator - broadcastCallbackHandler func(buffToSend []byte) + marshalizer marshal.Marshalizer + rewardTxPool dataRetriever.ShardedDataCacherNotifier + rewardTxStorer storage.Storer + addrConverter state.AddressConverter + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + broadcastCallbackHandler func(buffToSend []byte) } // NewRewardTxInterceptor hooks a new interceptor for reward transactions func NewRewardTxInterceptor( - marshalizer marshal.Marshalizer, - rewardTxPool dataRetriever.ShardedDataCacherNotifier, - rewardTxStorer storage.Storer, - addrConverter state.AddressConverter, - hasher hashing.Hasher, - shardCoordinator sharding.Coordinator, + marshalizer marshal.Marshalizer, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + rewardTxStorer storage.Storer, + addrConverter state.AddressConverter, + hasher hashing.Hasher, + shardCoordinator sharding.Coordinator, ) (*RewardTxInterceptor, error) { - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - if rewardTxPool == nil { - return nil, process.ErrNilRewardTxDataPool - } - if rewardTxStorer == nil { - return nil, process.ErrNilRewardsTxStorage - } - if addrConverter == nil { - return nil, process.ErrNilAddressConverter - } - if hasher == nil { - return nil, process.ErrNilHasher - } - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - rewardTxIntercept := &RewardTxInterceptor{ - marshalizer: marshalizer, - rewardTxPool: rewardTxPool, - rewardTxStorer: rewardTxStorer, - hasher: hasher, - addrConverter: addrConverter, - shardCoordinator: shardCoordinator, - } - - return rewardTxIntercept, nil + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + if rewardTxPool == nil { + return nil, process.ErrNilRewardTxDataPool + } + if rewardTxStorer == nil { + return nil, process.ErrNilRewardsTxStorage + } + if addrConverter == nil { + return nil, process.ErrNilAddressConverter + } + if hasher == nil { + return nil, process.ErrNilHasher + } + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + rewardTxIntercept := &RewardTxInterceptor{ + marshalizer: marshalizer, + rewardTxPool: rewardTxPool, + rewardTxStorer: rewardTxStorer, + hasher: hasher, + addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + } + + return rewardTxIntercept, nil } // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { - if message == nil { - return process.ErrNilMessage - } - - if message.Data() == nil { - return process.ErrNilDataToProcess - } - - rewardTxsBuff := make([][]byte, 0) - err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) - if err != nil { - return err - } - if len(rewardTxsBuff) == 0 { - return process.ErrNoRewardTransactionInMessage - } - - filteredRTxBuffs := make([][]byte, 0) - lastErrEncountered := error(nil) - for _, rewardTxBuff := range rewardTxsBuff { - rewardTxIntercepted, err := NewInterceptedRewardTransaction( - rewardTxBuff, - rti.marshalizer, - rti.hasher, - rti.addrConverter, - rti.shardCoordinator) - - if err != nil { - lastErrEncountered = err - continue - } - - //reward tx is validated, add it to filtered out reward txs - filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) - if rewardTxIntercepted.IsAddressedToOtherShards() { - log.Debug("intercepted reward transaction is for other shards") - - continue - } - - go rti.processRewardTransaction(rewardTxIntercepted) - } - - var buffToSend []byte - filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil - if filteredOutRTxsNeedToBeSend { - buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) - if err != nil { - return err - } - } - - if rti.broadcastCallbackHandler != nil { - rti.broadcastCallbackHandler(buffToSend) - } - - return lastErrEncountered + if message == nil { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + rewardTxsBuff := make([][]byte, 0) + err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) + if err != nil { + return err + } + if len(rewardTxsBuff) == 0 { + return process.ErrNoRewardTransactionInMessage + } + + filteredRTxBuffs := make([][]byte, 0) + lastErrEncountered := error(nil) + for _, rewardTxBuff := range rewardTxsBuff { + rewardTxIntercepted, err := NewInterceptedRewardTransaction( + rewardTxBuff, + rti.marshalizer, + rti.hasher, + rti.addrConverter, + rti.shardCoordinator) + + if err != nil { + lastErrEncountered = err + continue + } + + //reward tx is validated, add it to filtered out reward txs + filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) + if rewardTxIntercepted.IsAddressedToOtherShards() { + log.Debug("intercepted reward transaction is for other shards") + + continue + } + + go rti.processRewardTransaction(rewardTxIntercepted) + } + + var buffToSend []byte + filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil + if filteredOutRTxsNeedToBeSend { + buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) + if err != nil { + return err + } + } + + if rti.broadcastCallbackHandler != nil { + rti.broadcastCallbackHandler(buffToSend) + } + + return lastErrEncountered } // SetBroadcastCallback sets the callback method to send filtered out message func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend []byte)) { - rti.broadcastCallbackHandler = callback + rti.broadcastCallbackHandler = callback } func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { - cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) - rti.rewardTxPool.AddData( - rTx.Hash(), - rTx.RewardTransaction(), - cacherIdentifier, - ) + cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) + rti.rewardTxPool.AddData( + rTx.Hash(), + rTx.RewardTransaction(), + cacherIdentifier, + ) } // IsInterfaceNil returns true if there is no value under the interface func (rti *RewardTxInterceptor) IsInterfaceNil() bool { - if rti == nil { - return true - } - return false + if rti == nil { + return true + } + return false } diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index 75a9209be24..950b7fc27f6 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,113 +1,113 @@ package rewardTransaction import ( - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) type rewardTxProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator - mutRewardsForwarder sync.Mutex - rewardTxForwarder process.IntermediateTransactionHandler + mutRewardsForwarder sync.Mutex + rewardTxForwarder process.IntermediateTransactionHandler } // NewRewardTxProcessor creates a rewardTxProcessor instance // TODO: add unit tests func NewRewardTxProcessor( - accountsDB state.AccountsAdapter, - adrConv state.AddressConverter, - coordinator sharding.Coordinator, - rewardTxForwarder process.IntermediateTransactionHandler, + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, + rewardTxForwarder process.IntermediateTransactionHandler, ) (*rewardTxProcessor, error) { - if accountsDB == nil { - return nil, process.ErrNilAccountsAdapter - } - if adrConv == nil { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - return &rewardTxProcessor{ - accounts: accountsDB, - adrConv: adrConv, - shardCoordinator: coordinator, - rewardTxForwarder: rewardTxForwarder, - }, nil + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + rewardTxForwarder: rewardTxForwarder, + }, nil } func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - addr, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - shardForCurrentNode := rtp.shardCoordinator.SelfId() - shardForAddr := rtp.shardCoordinator.ComputeId(addr) - if shardForCurrentNode != shardForAddr { - return nil, nil - } - - acnt, err := rtp.accounts.GetAccountWithJournal(addr) - if err != nil { - return nil, err - } - - return acnt, nil + addr, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForAddr := rtp.shardCoordinator.ComputeId(addr) + if shardForCurrentNode != shardForAddr { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(addr) + if err != nil { + return nil, err + } + + return acnt, nil } // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if rTx == nil { - return process.ErrNilRewardTransaction - } - if rTx.Value == nil { - return process.ErrNilValueFromRewardTransaction - } - - rtp.mutRewardsForwarder.Lock() - err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) - rtp.mutRewardsForwarder.Unlock() - if err != nil { - return err - } - - accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) - if err != nil { - return err - } - - if accHandler == nil || accHandler.IsInterfaceNil() { - // address from different shard - return nil - } - - rewardAcc, ok := accHandler.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - operation := big.NewInt(0) - operation = operation.Add(rTx.Value, rewardAcc.Balance) - err = rewardAcc.SetBalanceWithJournal(operation) - - return err + if rTx == nil { + return process.ErrNilRewardTransaction + } + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + rtp.mutRewardsForwarder.Lock() + err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + if err != nil { + return err + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + + if accHandler == nil || accHandler.IsInterfaceNil() { + // address from different shard + return nil + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + + return err } // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxProcessor) IsInterfaceNil() bool { - if rtp == nil { - return true - } - return false + if rtp == nil { + return true + } + return false } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 0d3d1d8e3d5..cfd5b36965d 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -1,808 +1,808 @@ package smartContract import ( - "bytes" - "encoding/hex" - "fmt" - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "bytes" + "encoding/hex" + "fmt" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) type scExecutionState struct { - allLogs map[string][]*vmcommon.LogEntry - allReturnData map[string][]*big.Int - returnCodes map[string]vmcommon.ReturnCode - rootHash []byte + allLogs map[string][]*vmcommon.LogEntry + allReturnData map[string][]*big.Int + returnCodes map[string]vmcommon.ReturnCode + rootHash []byte } type scProcessor struct { - accounts state.AccountsAdapter - tempAccounts process.TemporaryAccountsHandler - adrConv state.AddressConverter - hasher hashing.Hasher - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - vmContainer process.VirtualMachinesContainer - argsParser process.ArgumentsParser - - mutSCState sync.Mutex - mapExecState map[uint64]scExecutionState - - scrForwarder process.IntermediateTransactionHandler - txFeeHandler process.TransactionFeeHandler + accounts state.AccountsAdapter + tempAccounts process.TemporaryAccountsHandler + adrConv state.AddressConverter + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + vmContainer process.VirtualMachinesContainer + argsParser process.ArgumentsParser + + mutSCState sync.Mutex + mapExecState map[uint64]scExecutionState + + scrForwarder process.IntermediateTransactionHandler + txFeeHandler process.TransactionFeeHandler } var log = logger.DefaultLogger() // NewSmartContractProcessor create a smart contract processor creates and interprets VM data func NewSmartContractProcessor( - vmContainer process.VirtualMachinesContainer, - argsParser process.ArgumentsParser, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - accountsDB state.AccountsAdapter, - tempAccounts process.TemporaryAccountsHandler, - adrConv state.AddressConverter, - coordinator sharding.Coordinator, - scrForwarder process.IntermediateTransactionHandler, - txFeeHandler process.TransactionFeeHandler, + vmContainer process.VirtualMachinesContainer, + argsParser process.ArgumentsParser, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + accountsDB state.AccountsAdapter, + tempAccounts process.TemporaryAccountsHandler, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, + scrForwarder process.IntermediateTransactionHandler, + txFeeHandler process.TransactionFeeHandler, ) (*scProcessor, error) { - if vmContainer == nil || vmContainer.IsInterfaceNil() { - return nil, process.ErrNoVM - } - if argsParser == nil || argsParser.IsInterfaceNil() { - return nil, process.ErrNilArgumentParser - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if accountsDB == nil || accountsDB.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if tempAccounts == nil || tempAccounts.IsInterfaceNil() { - return nil, process.ErrNilTemporaryAccountsHandler - } - if adrConv == nil || adrConv.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil || coordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if scrForwarder == nil || scrForwarder.IsInterfaceNil() { - return nil, process.ErrNilIntermediateTransactionHandler - } - if txFeeHandler == nil { - return nil, process.ErrNilUnsignedTxHandler - } - - return &scProcessor{ - vmContainer: vmContainer, - argsParser: argsParser, - hasher: hasher, - marshalizer: marshalizer, - accounts: accountsDB, - tempAccounts: tempAccounts, - adrConv: adrConv, - shardCoordinator: coordinator, - scrForwarder: scrForwarder, - txFeeHandler: txFeeHandler, - mapExecState: make(map[uint64]scExecutionState)}, nil + if vmContainer == nil || vmContainer.IsInterfaceNil() { + return nil, process.ErrNoVM + } + if argsParser == nil || argsParser.IsInterfaceNil() { + return nil, process.ErrNilArgumentParser + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if accountsDB == nil || accountsDB.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if tempAccounts == nil || tempAccounts.IsInterfaceNil() { + return nil, process.ErrNilTemporaryAccountsHandler + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if scrForwarder == nil || scrForwarder.IsInterfaceNil() { + return nil, process.ErrNilIntermediateTransactionHandler + } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } + + return &scProcessor{ + vmContainer: vmContainer, + argsParser: argsParser, + hasher: hasher, + marshalizer: marshalizer, + accounts: accountsDB, + tempAccounts: tempAccounts, + adrConv: adrConv, + shardCoordinator: coordinator, + scrForwarder: scrForwarder, + txFeeHandler: txFeeHandler, + mapExecState: make(map[uint64]scExecutionState)}, nil } // ComputeTransactionType calculates the type of the transaction func (sc *scProcessor) ComputeTransactionType(tx *transaction.Transaction) (process.TransactionType, error) { - err := sc.checkTxValidity(tx) - if err != nil { - return 0, err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if isEmptyAddress { - if len(tx.Data) > 0 { - return process.SCDeployment, nil - } - return 0, process.ErrWrongTransaction - } - - acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) - if err != nil { - return 0, err - } - - if acntDst == nil || acntDst.IsInterfaceNil() { - return process.MoveBalance, nil - } - - if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { - return process.SCInvoking, nil - } - - return process.MoveBalance, nil + err := sc.checkTxValidity(tx) + if err != nil { + return 0, err + } + + isEmptyAddress := sc.isDestAddressEmpty(tx) + if isEmptyAddress { + if len(tx.Data) > 0 { + return process.SCDeployment, nil + } + return 0, process.ErrWrongTransaction + } + + acntDst, err := sc.getAccountFromAddress(tx.RcvAddr) + if err != nil { + return 0, err + } + + if acntDst == nil || acntDst.IsInterfaceNil() { + return process.MoveBalance, nil + } + + if !acntDst.IsInterfaceNil() && len(acntDst.GetCode()) > 0 { + return process.SCInvoking, nil + } + + return process.MoveBalance, nil } func (sc *scProcessor) checkTxValidity(tx *transaction.Transaction) error { - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } - recvAddressIsInvalid := sc.adrConv.AddressLen() != len(tx.RcvAddr) - if recvAddressIsInvalid { - return process.ErrWrongTransaction - } + recvAddressIsInvalid := sc.adrConv.AddressLen() != len(tx.RcvAddr) + if recvAddressIsInvalid { + return process.ErrWrongTransaction + } - return nil + return nil } func (sc *scProcessor) isDestAddressEmpty(tx *transaction.Transaction) bool { - isEmptyAddress := bytes.Equal(tx.RcvAddr, make([]byte, sc.adrConv.AddressLen())) - return isEmptyAddress + isEmptyAddress := bytes.Equal(tx.RcvAddr, make([]byte, sc.adrConv.AddressLen())) + return isEmptyAddress } // ExecuteSmartContractTransaction processes the transaction, call the VM and processes the SC call output func (sc *scProcessor) ExecuteSmartContractTransaction( - tx *transaction.Transaction, - acntSnd, acntDst state.AccountHandler, - round uint64, + tx *transaction.Transaction, + acntSnd, acntDst state.AccountHandler, + round uint64, ) error { - defer sc.tempAccounts.CleanTempAccounts() - - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } - if acntDst == nil || acntDst.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - if acntDst.IsInterfaceNil() || acntDst.GetCode() == nil { - return process.ErrNilSCDestAccount - } - - err := sc.prepareSmartContractCall(tx, acntSnd) - if err != nil { - return err - } - - vmInput, err := sc.createVMCallInput(tx) - if err != nil { - return err - } - - vm, err := sc.getVMFromTransaction(tx) - if err != nil { - return err - } - - vmOutput, err := vm.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - // VM is formally verified and the output is correct - crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) - if err != nil { - return err - } - - err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) - if err != nil { - return err - } - - sc.txFeeHandler.ProcessTransactionFee(consumedFee) - - return nil + defer sc.tempAccounts.CleanTempAccounts() + + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } + if acntDst == nil || acntDst.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + if acntDst.IsInterfaceNil() || acntDst.GetCode() == nil { + return process.ErrNilSCDestAccount + } + + err := sc.prepareSmartContractCall(tx, acntSnd) + if err != nil { + return err + } + + vmInput, err := sc.createVMCallInput(tx) + if err != nil { + return err + } + + vm, err := sc.getVMFromTransaction(tx) + if err != nil { + return err + } + + vmOutput, err := vm.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + // VM is formally verified and the output is correct + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + if err != nil { + return err + } + + err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) + if err != nil { + return err + } + + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + + return nil } func (sc *scProcessor) prepareSmartContractCall(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - err := sc.argsParser.ParseData(tx.Data) - if err != nil { - return err - } - - err = sc.processSCPayment(tx, acntSnd) - if err != nil { - return err - } - - nonce := tx.Nonce - if acntSnd != nil && !acntSnd.IsInterfaceNil() { - nonce = acntSnd.GetNonce() - } - txValue := big.NewInt(0).Set(tx.Value) - sc.tempAccounts.AddTempAccount(tx.SndAddr, txValue, nonce) - - return nil + err := sc.argsParser.ParseData(tx.Data) + if err != nil { + return err + } + + err = sc.processSCPayment(tx, acntSnd) + if err != nil { + return err + } + + nonce := tx.Nonce + if acntSnd != nil && !acntSnd.IsInterfaceNil() { + nonce = acntSnd.GetNonce() + } + txValue := big.NewInt(0).Set(tx.Value) + sc.tempAccounts.AddTempAccount(tx.SndAddr, txValue, nonce) + + return nil } func (sc *scProcessor) getVMFromTransaction(tx *transaction.Transaction) (vmcommon.VMExecutionHandler, error) { - //TODO add processing here - like calculating what kind of VM does this contract call needs - vm, err := sc.vmContainer.Get([]byte(factory.IELEVirtualMachine)) - if err != nil { - return nil, err - } - return vm, nil + //TODO add processing here - like calculating what kind of VM does this contract call needs + vm, err := sc.vmContainer.Get([]byte(factory.IELEVirtualMachine)) + if err != nil { + return nil, err + } + return vm, nil } // DeploySmartContract processes the transaction, than deploy the smart contract into VM, final code is saved in account func (sc *scProcessor) DeploySmartContract( - tx *transaction.Transaction, - acntSnd state.AccountHandler, - round uint64, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, ) error { - defer sc.tempAccounts.CleanTempAccounts() - - err := sc.checkTxValidity(tx) - if err != nil { - return err - } - - isEmptyAddress := sc.isDestAddressEmpty(tx) - if !isEmptyAddress { - return process.ErrWrongTransaction - } - - err = sc.prepareSmartContractCall(tx, acntSnd) - if err != nil { - return err - } - - vmInput, err := sc.createVMDeployInput(tx) - if err != nil { - return err - } - - vm, err := sc.getVMFromTransaction(tx) - if err != nil { - return err - } - - // TODO: Smart contract address calculation - vmOutput, err := vm.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - - // VM is formally verified, the output is correct - crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) - if err != nil { - return err - } - - err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) - if err != nil { - return err - } - - sc.txFeeHandler.ProcessTransactionFee(consumedFee) - - return nil + defer sc.tempAccounts.CleanTempAccounts() + + err := sc.checkTxValidity(tx) + if err != nil { + return err + } + + isEmptyAddress := sc.isDestAddressEmpty(tx) + if !isEmptyAddress { + return process.ErrWrongTransaction + } + + err = sc.prepareSmartContractCall(tx, acntSnd) + if err != nil { + return err + } + + vmInput, err := sc.createVMDeployInput(tx) + if err != nil { + return err + } + + vm, err := sc.getVMFromTransaction(tx) + if err != nil { + return err + } + + // TODO: Smart contract address calculation + vmOutput, err := vm.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + + // VM is formally verified, the output is correct + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + if err != nil { + return err + } + + err = sc.scrForwarder.AddIntermediateTransactions(crossTxs) + if err != nil { + return err + } + + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + + return nil } func (sc *scProcessor) createVMCallInput(tx *transaction.Transaction) (*vmcommon.ContractCallInput, error) { - vmInput, err := sc.createVMInput(tx) - if err != nil { - return nil, err - } + vmInput, err := sc.createVMInput(tx) + if err != nil { + return nil, err + } - vmCallInput := &vmcommon.ContractCallInput{} - vmCallInput.VMInput = *vmInput - vmCallInput.Function, err = sc.argsParser.GetFunction() - if err != nil { - return nil, err - } + vmCallInput := &vmcommon.ContractCallInput{} + vmCallInput.VMInput = *vmInput + vmCallInput.Function, err = sc.argsParser.GetFunction() + if err != nil { + return nil, err + } - vmCallInput.RecipientAddr = tx.RcvAddr + vmCallInput.RecipientAddr = tx.RcvAddr - return vmCallInput, nil + return vmCallInput, nil } func (sc *scProcessor) createVMDeployInput(tx *transaction.Transaction) (*vmcommon.ContractCreateInput, error) { - vmInput, err := sc.createVMInput(tx) - if err != nil { - return nil, err - } + vmInput, err := sc.createVMInput(tx) + if err != nil { + return nil, err + } - vmCreateInput := &vmcommon.ContractCreateInput{} - hexCode, err := sc.argsParser.GetCode() - if err != nil { - return nil, err - } + vmCreateInput := &vmcommon.ContractCreateInput{} + hexCode, err := sc.argsParser.GetCode() + if err != nil { + return nil, err + } - vmCreateInput.ContractCode, err = hex.DecodeString(string(hexCode)) - if err != nil { - return nil, err - } + vmCreateInput.ContractCode, err = hex.DecodeString(string(hexCode)) + if err != nil { + return nil, err + } - vmCreateInput.VMInput = *vmInput + vmCreateInput.VMInput = *vmInput - return vmCreateInput, nil + return vmCreateInput, nil } func (sc *scProcessor) createVMInput(tx *transaction.Transaction) (*vmcommon.VMInput, error) { - var err error - vmInput := &vmcommon.VMInput{} - - vmInput.CallerAddr = tx.SndAddr - vmInput.Arguments, err = sc.argsParser.GetArguments() - if err != nil { - return nil, err - } - vmInput.CallValue = tx.Value - vmInput.GasPrice = big.NewInt(int64(tx.GasPrice)) - vmInput.GasProvided = big.NewInt(int64(tx.GasLimit)) - - //TODO: change this when we know for what they are used. - scCallHeader := &vmcommon.SCCallHeader{} - scCallHeader.GasLimit = big.NewInt(0) - scCallHeader.Number = big.NewInt(0) - scCallHeader.Timestamp = big.NewInt(0) - scCallHeader.Beneficiary = big.NewInt(0) - - vmInput.Header = scCallHeader - - return vmInput, nil + var err error + vmInput := &vmcommon.VMInput{} + + vmInput.CallerAddr = tx.SndAddr + vmInput.Arguments, err = sc.argsParser.GetArguments() + if err != nil { + return nil, err + } + vmInput.CallValue = tx.Value + vmInput.GasPrice = big.NewInt(int64(tx.GasPrice)) + vmInput.GasProvided = big.NewInt(int64(tx.GasLimit)) + + //TODO: change this when we know for what they are used. + scCallHeader := &vmcommon.SCCallHeader{} + scCallHeader.GasLimit = big.NewInt(0) + scCallHeader.Number = big.NewInt(0) + scCallHeader.Timestamp = big.NewInt(0) + scCallHeader.Beneficiary = big.NewInt(0) + + vmInput.Header = scCallHeader + + return vmInput, nil } // taking money from sender, as VM might not have access to him because of state sharding func (sc *scProcessor) processSCPayment(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - // transaction was already done at sender shard - return nil - } - - err := acntSnd.SetNonceWithJournal(acntSnd.GetNonce() + 1) - if err != nil { - return err - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - cost = cost.Add(cost, tx.Value) - - if cost.Cmp(big.NewInt(0)) == 0 { - return nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if stAcc.Balance.Cmp(cost) < 0 { - return process.ErrInsufficientFunds - } - - totalCost := big.NewInt(0) - err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) - if err != nil { - return err - } - - return nil + if acntSnd == nil || acntSnd.IsInterfaceNil() { + // transaction was already done at sender shard + return nil + } + + err := acntSnd.SetNonceWithJournal(acntSnd.GetNonce() + 1) + if err != nil { + return err + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + cost = cost.Add(cost, tx.Value) + + if cost.Cmp(big.NewInt(0)) == 0 { + return nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + + totalCost := big.NewInt(0) + err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) + if err != nil { + return err + } + + return nil } func (sc *scProcessor) processVMOutput( - vmOutput *vmcommon.VMOutput, - tx *transaction.Transaction, - acntSnd state.AccountHandler, - round uint64, + vmOutput *vmcommon.VMOutput, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, ) ([]data.TransactionHandler, *big.Int, error) { - if vmOutput == nil { - return nil, nil, process.ErrNilVMOutput - } - if tx == nil { - return nil, nil, process.ErrNilTransaction - } - - txBytes, err := sc.marshalizer.Marshal(tx) - if err != nil { - return nil, nil, err - } - txHash := sc.hasher.Compute(string(txBytes)) - - if vmOutput.ReturnCode != vmcommon.Ok { - log.Info(fmt.Sprintf( - "error processing tx %s in VM: return code: %s", - hex.EncodeToString(txHash), - vmOutput.ReturnCode), - ) - } - - err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) - if err != nil { - return nil, nil, err - } - - crossOutAccs, err := sc.processSCOutputAccounts(vmOutput.OutputAccounts) - if err != nil { - return nil, nil, err - } - - crossTxs, err := sc.createCrossShardTransactions(crossOutAccs, tx, txHash) - if err != nil { - return nil, nil, err - } - - acntSnd, err = sc.reloadLocalSndAccount(acntSnd) - if err != nil { - return nil, nil, err - } - - totalGasRefund := big.NewInt(0) - totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) - scrIfCrossShard, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) - if err != nil { - return nil, nil, err - } - - if scrIfCrossShard != nil { - crossTxs = append(crossTxs, scrIfCrossShard) - } - - err = sc.deleteAccounts(vmOutput.DeletedAccounts) - if err != nil { - return nil, nil, err - } - - err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) - if err != nil { - return nil, nil, err - } - - return crossTxs, consumedFee, nil + if vmOutput == nil { + return nil, nil, process.ErrNilVMOutput + } + if tx == nil { + return nil, nil, process.ErrNilTransaction + } + + txBytes, err := sc.marshalizer.Marshal(tx) + if err != nil { + return nil, nil, err + } + txHash := sc.hasher.Compute(string(txBytes)) + + if vmOutput.ReturnCode != vmcommon.Ok { + log.Info(fmt.Sprintf( + "error processing tx %s in VM: return code: %s", + hex.EncodeToString(txHash), + vmOutput.ReturnCode), + ) + } + + err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) + if err != nil { + return nil, nil, err + } + + crossOutAccs, err := sc.processSCOutputAccounts(vmOutput.OutputAccounts) + if err != nil { + return nil, nil, err + } + + crossTxs, err := sc.createCrossShardTransactions(crossOutAccs, tx, txHash) + if err != nil { + return nil, nil, err + } + + acntSnd, err = sc.reloadLocalSndAccount(acntSnd) + if err != nil { + return nil, nil, err + } + + totalGasRefund := big.NewInt(0) + totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) + scrIfCrossShard, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) + if err != nil { + return nil, nil, err + } + + if scrIfCrossShard != nil { + crossTxs = append(crossTxs, scrIfCrossShard) + } + + err = sc.deleteAccounts(vmOutput.DeletedAccounts) + if err != nil { + return nil, nil, err + } + + err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) + if err != nil { + return nil, nil, err + } + + return crossTxs, consumedFee, nil } // reloadLocalSndAccount will reload from current account state the sender account // this requirement is needed because in the case of refunding the exact account that was previously // modified in saveSCOutputToCurrentState, the modifications done there should be visible here func (sc *scProcessor) reloadLocalSndAccount(acntSnd state.AccountHandler) (state.AccountHandler, error) { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - return acntSnd, nil - } + if acntSnd == nil || acntSnd.IsInterfaceNil() { + return acntSnd, nil + } - isAccountFromCurrentShard := acntSnd.AddressContainer() != nil - if !isAccountFromCurrentShard { - return acntSnd, nil - } + isAccountFromCurrentShard := acntSnd.AddressContainer() != nil + if !isAccountFromCurrentShard { + return acntSnd, nil + } - return sc.getAccountFromAddress(acntSnd.AddressContainer().Bytes()) + return sc.getAccountFromAddress(acntSnd.AddressContainer().Bytes()) } func (sc *scProcessor) createSmartContractResult( - outAcc *vmcommon.OutputAccount, - scAddress []byte, - txHash []byte, + outAcc *vmcommon.OutputAccount, + scAddress []byte, + txHash []byte, ) *smartContractResult.SmartContractResult { - crossSc := &smartContractResult.SmartContractResult{} + crossSc := &smartContractResult.SmartContractResult{} - crossSc.Value = outAcc.Balance - crossSc.Nonce = outAcc.Nonce.Uint64() - crossSc.RcvAddr = outAcc.Address - crossSc.SndAddr = scAddress - crossSc.Code = outAcc.Code - crossSc.Data = sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates) - crossSc.TxHash = txHash + crossSc.Value = outAcc.Balance + crossSc.Nonce = outAcc.Nonce.Uint64() + crossSc.RcvAddr = outAcc.Address + crossSc.SndAddr = scAddress + crossSc.Code = outAcc.Code + crossSc.Data = sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates) + crossSc.TxHash = txHash - return crossSc + return crossSc } func (sc *scProcessor) createCrossShardTransactions( - crossOutAccs []*vmcommon.OutputAccount, - tx *transaction.Transaction, - txHash []byte, + crossOutAccs []*vmcommon.OutputAccount, + tx *transaction.Transaction, + txHash []byte, ) ([]data.TransactionHandler, error) { - crossSCTxs := make([]data.TransactionHandler, 0) + crossSCTxs := make([]data.TransactionHandler, 0) - for i := 0; i < len(crossOutAccs); i++ { - scTx := sc.createSmartContractResult(crossOutAccs[i], tx.RcvAddr, txHash) - crossSCTxs = append(crossSCTxs, scTx) - } + for i := 0; i < len(crossOutAccs); i++ { + scTx := sc.createSmartContractResult(crossOutAccs[i], tx.RcvAddr, txHash) + crossSCTxs = append(crossSCTxs, scTx) + } - return crossSCTxs, nil + return crossSCTxs, nil } // give back the user the unused gas money func (sc *scProcessor) refundGasToSender( - gasRefund *big.Int, - tx *transaction.Transaction, - txHash []byte, - acntSnd state.AccountHandler, + gasRefund *big.Int, + tx *transaction.Transaction, + txHash []byte, + acntSnd state.AccountHandler, ) (*smartContractResult.SmartContractResult, *big.Int, error) { - consumedFee := big.NewInt(0) - consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { - return nil, consumedFee, nil - } - - refundErd := big.NewInt(0) - refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) - consumedFee = consumedFee.Sub(consumedFee, refundErd) - - scTx := &smartContractResult.SmartContractResult{} - scTx.Value = refundErd - scTx.RcvAddr = tx.SndAddr - scTx.SndAddr = tx.RcvAddr - scTx.Nonce = tx.Nonce + 1 - scTx.TxHash = txHash - - if acntSnd == nil || acntSnd.IsInterfaceNil() { - return scTx, consumedFee, nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) - err := stAcc.SetBalanceWithJournal(newBalance) - if err != nil { - return nil, nil, err - } - - return scTx, consumedFee, nil + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { + return nil, consumedFee, nil + } + + refundErd := big.NewInt(0) + refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) + consumedFee = consumedFee.Sub(consumedFee, refundErd) + + scTx := &smartContractResult.SmartContractResult{} + scTx.Value = refundErd + scTx.RcvAddr = tx.SndAddr + scTx.SndAddr = tx.RcvAddr + scTx.Nonce = tx.Nonce + 1 + scTx.TxHash = txHash + + if acntSnd == nil || acntSnd.IsInterfaceNil() { + return scTx, consumedFee, nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) + err := stAcc.SetBalanceWithJournal(newBalance) + if err != nil { + return nil, nil, err + } + + return scTx, consumedFee, nil } // save account changes in state from vmOutput - protected by VM - every output can be treated as is. func (sc *scProcessor) processSCOutputAccounts(outputAccounts []*vmcommon.OutputAccount) ([]*vmcommon.OutputAccount, error) { - crossOutAccs := make([]*vmcommon.OutputAccount, 0) - for i := 0; i < len(outputAccounts); i++ { - outAcc := outputAccounts[i] - acc, err := sc.getAccountFromAddress(outAcc.Address) - if err != nil { - return nil, err - } - - fakeAcc := sc.tempAccounts.TempAccount(outAcc.Address) - - if acc == nil || acc.IsInterfaceNil() { - crossOutAccs = append(crossOutAccs, outAcc) - continue - } - - for j := 0; j < len(outAcc.StorageUpdates); j++ { - storeUpdate := outAcc.StorageUpdates[j] - acc.DataTrieTracker().SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - } - - if len(outAcc.StorageUpdates) > 0 { - //SC with data variables - err := sc.accounts.SaveDataTrie(acc) - if err != nil { - return nil, err - } - } - - if len(outAcc.Code) > 0 { - err = sc.accounts.PutCode(acc, outAcc.Code) - if err != nil { - return nil, err - } - - //TODO remove this when receipts are implemented - log.Info(fmt.Sprintf("*** Generated/called SC account: %s ***", hex.EncodeToString(outAcc.Address))) - } - - if outAcc.Nonce == nil || outAcc.Nonce.Cmp(big.NewInt(int64(acc.GetNonce()))) < 0 { - return nil, process.ErrWrongNonceInVMOutput - } - - err = acc.SetNonceWithJournal(outAcc.Nonce.Uint64()) - if err != nil { - return nil, err - } - - if outAcc.Balance == nil { - return nil, process.ErrNilBalanceFromSC - } - - stAcc, ok := acc.(*state.Account) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - // if fake account, than VM only has transaction value as balance, so anything remaining is a plus - if fakeAcc != nil && !fakeAcc.IsInterfaceNil() { - outAcc.Balance = outAcc.Balance.Add(outAcc.Balance, stAcc.Balance) - } - - realBalanceChange := big.NewInt(0).Sub(outAcc.Balance, stAcc.Balance) - - // update the values according to SC output - err = stAcc.SetBalanceWithJournal(outAcc.Balance) - if err != nil { - return nil, err - } - - zero := big.NewInt(0) - if realBalanceChange.Cmp(zero) != 0 { - outAcc.Balance = realBalanceChange - crossOutAccs = append(crossOutAccs, outAcc) - } - } - - return crossOutAccs, nil + crossOutAccs := make([]*vmcommon.OutputAccount, 0) + for i := 0; i < len(outputAccounts); i++ { + outAcc := outputAccounts[i] + acc, err := sc.getAccountFromAddress(outAcc.Address) + if err != nil { + return nil, err + } + + fakeAcc := sc.tempAccounts.TempAccount(outAcc.Address) + + if acc == nil || acc.IsInterfaceNil() { + crossOutAccs = append(crossOutAccs, outAcc) + continue + } + + for j := 0; j < len(outAcc.StorageUpdates); j++ { + storeUpdate := outAcc.StorageUpdates[j] + acc.DataTrieTracker().SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + } + + if len(outAcc.StorageUpdates) > 0 { + //SC with data variables + err := sc.accounts.SaveDataTrie(acc) + if err != nil { + return nil, err + } + } + + if len(outAcc.Code) > 0 { + err = sc.accounts.PutCode(acc, outAcc.Code) + if err != nil { + return nil, err + } + + //TODO remove this when receipts are implemented + log.Info(fmt.Sprintf("*** Generated/called SC account: %s ***", hex.EncodeToString(outAcc.Address))) + } + + if outAcc.Nonce == nil || outAcc.Nonce.Cmp(big.NewInt(int64(acc.GetNonce()))) < 0 { + return nil, process.ErrWrongNonceInVMOutput + } + + err = acc.SetNonceWithJournal(outAcc.Nonce.Uint64()) + if err != nil { + return nil, err + } + + if outAcc.Balance == nil { + return nil, process.ErrNilBalanceFromSC + } + + stAcc, ok := acc.(*state.Account) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + // if fake account, than VM only has transaction value as balance, so anything remaining is a plus + if fakeAcc != nil && !fakeAcc.IsInterfaceNil() { + outAcc.Balance = outAcc.Balance.Add(outAcc.Balance, stAcc.Balance) + } + + realBalanceChange := big.NewInt(0).Sub(outAcc.Balance, stAcc.Balance) + + // update the values according to SC output + err = stAcc.SetBalanceWithJournal(outAcc.Balance) + if err != nil { + return nil, err + } + + zero := big.NewInt(0) + if realBalanceChange.Cmp(zero) != 0 { + outAcc.Balance = realBalanceChange + crossOutAccs = append(crossOutAccs, outAcc) + } + } + + return crossOutAccs, nil } // delete accounts - only suicide by current SC or another SC called by current SC - protected by VM func (sc *scProcessor) deleteAccounts(deletedAccounts [][]byte) error { - for _, value := range deletedAccounts { - acc, err := sc.getAccountFromAddress(value) - if err != nil { - return err - } - - if acc == nil || acc.IsInterfaceNil() { - //TODO: sharded Smart Contract processing - continue - } - - err = sc.accounts.RemoveAccount(acc.AddressContainer()) - if err != nil { - return err - } - } - return nil + for _, value := range deletedAccounts { + acc, err := sc.getAccountFromAddress(value) + if err != nil { + return err + } + + if acc == nil || acc.IsInterfaceNil() { + //TODO: sharded Smart Contract processing + continue + } + + err = sc.accounts.RemoveAccount(acc.AddressContainer()) + if err != nil { + return err + } + } + return nil } func (sc *scProcessor) processTouchedAccounts(touchedAccounts [][]byte) error { - //TODO: implement - return nil + //TODO: implement + return nil } func (sc *scProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - adrSrc, err := sc.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - shardForCurrentNode := sc.shardCoordinator.SelfId() - shardForSrc := sc.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := sc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + adrSrc, err := sc.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := sc.shardCoordinator.SelfId() + shardForSrc := sc.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := sc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } // GetAllSmartContractCallRootHash returns the roothash of the state of the SC executions for defined round func (sc *scProcessor) GetAllSmartContractCallRootHash(round uint64) []byte { - return []byte("roothash") + return []byte("roothash") } // saves VM output into state func (sc *scProcessor) saveSCOutputToCurrentState(output *vmcommon.VMOutput, round uint64, txHash []byte) error { - var err error - - sc.mutSCState.Lock() - defer sc.mutSCState.Unlock() - - if _, ok := sc.mapExecState[round]; !ok { - sc.mapExecState[round] = scExecutionState{ - allLogs: make(map[string][]*vmcommon.LogEntry), - allReturnData: make(map[string][]*big.Int), - returnCodes: make(map[string]vmcommon.ReturnCode)} - } - - tmpCurrScState := sc.mapExecState[round] - defer func() { - if err != nil { - sc.mapExecState[round] = tmpCurrScState - } - }() - - err = sc.saveReturnData(output.ReturnData, round, txHash) - if err != nil { - return err - } - - err = sc.saveReturnCode(output.ReturnCode, round, txHash) - if err != nil { - return err - } - - err = sc.saveLogsIntoState(output.Logs, round, txHash) - if err != nil { - return err - } - - return nil + var err error + + sc.mutSCState.Lock() + defer sc.mutSCState.Unlock() + + if _, ok := sc.mapExecState[round]; !ok { + sc.mapExecState[round] = scExecutionState{ + allLogs: make(map[string][]*vmcommon.LogEntry), + allReturnData: make(map[string][]*big.Int), + returnCodes: make(map[string]vmcommon.ReturnCode)} + } + + tmpCurrScState := sc.mapExecState[round] + defer func() { + if err != nil { + sc.mapExecState[round] = tmpCurrScState + } + }() + + err = sc.saveReturnData(output.ReturnData, round, txHash) + if err != nil { + return err + } + + err = sc.saveReturnCode(output.ReturnCode, round, txHash) + if err != nil { + return err + } + + err = sc.saveLogsIntoState(output.Logs, round, txHash) + if err != nil { + return err + } + + return nil } // saves return data into account state func (sc *scProcessor) saveReturnData(returnData []*big.Int, round uint64, txHash []byte) error { - sc.mapExecState[round].allReturnData[string(txHash)] = returnData - return nil + sc.mapExecState[round].allReturnData[string(txHash)] = returnData + return nil } // saves smart contract return code into account state func (sc *scProcessor) saveReturnCode(returnCode vmcommon.ReturnCode, round uint64, txHash []byte) error { - sc.mapExecState[round].returnCodes[string(txHash)] = returnCode - return nil + sc.mapExecState[round].returnCodes[string(txHash)] = returnCode + return nil } // save vm output logs into accounts func (sc *scProcessor) saveLogsIntoState(logs []*vmcommon.LogEntry, round uint64, txHash []byte) error { - sc.mapExecState[round].allLogs[string(txHash)] = logs - return nil + sc.mapExecState[round].allLogs[string(txHash)] = logs + return nil } // ProcessSmartContractResult updates the account state from the smart contract result func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error { - if scr == nil { - return process.ErrNilSmartContractResult - } - - accHandler, err := sc.getAccountFromAddress(scr.RcvAddr) - if err != nil { - return err - } - if accHandler == nil || accHandler.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - - stAcc, ok := accHandler.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - storageUpdates, err := sc.argsParser.GetStorageUpdates(scr.Data) - for i := 0; i < len(storageUpdates); i++ { - stAcc.DataTrieTracker().SaveKeyValue(storageUpdates[i].Offset, storageUpdates[i].Data) - } - - if len(scr.Data) > 0 { - //SC with data variables - err := sc.accounts.SaveDataTrie(stAcc) - if err != nil { - return err - } - } - - if len(scr.Code) > 0 { - err = sc.accounts.PutCode(stAcc, scr.Code) - if err != nil { - return err - } - } - - if scr.Value == nil { - return process.ErrNilBalanceFromSC - } - - operation := big.NewInt(0) - operation = operation.Add(scr.Value, stAcc.Balance) - err = stAcc.SetBalanceWithJournal(operation) - if err != nil { - return err - } - - return nil + if scr == nil { + return process.ErrNilSmartContractResult + } + + accHandler, err := sc.getAccountFromAddress(scr.RcvAddr) + if err != nil { + return err + } + if accHandler == nil || accHandler.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + + stAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + storageUpdates, err := sc.argsParser.GetStorageUpdates(scr.Data) + for i := 0; i < len(storageUpdates); i++ { + stAcc.DataTrieTracker().SaveKeyValue(storageUpdates[i].Offset, storageUpdates[i].Data) + } + + if len(scr.Data) > 0 { + //SC with data variables + err := sc.accounts.SaveDataTrie(stAcc) + if err != nil { + return err + } + } + + if len(scr.Code) > 0 { + err = sc.accounts.PutCode(stAcc, scr.Code) + if err != nil { + return err + } + } + + if scr.Value == nil { + return process.ErrNilBalanceFromSC + } + + operation := big.NewInt(0) + operation = operation.Add(scr.Value, stAcc.Balance) + err = stAcc.SetBalanceWithJournal(operation) + if err != nil { + return err + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface func (sc *scProcessor) IsInterfaceNil() bool { - if sc == nil { - return true - } - return false + if sc == nil { + return true + } + return false } diff --git a/process/transaction/process.go b/process/transaction/process.go index 2903848424f..9a6ca6971ec 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -1,17 +1,17 @@ package transaction import ( - "bytes" - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() @@ -26,365 +26,365 @@ var mutTxFee sync.RWMutex // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - hasher hashing.Hasher - scProcessor process.SmartContractProcessor - marshalizer marshal.Marshalizer - txFeeHandler process.TransactionFeeHandler - shardCoordinator sharding.Coordinator - txTypeHandler process.TxTypeHandler + accounts state.AccountsAdapter + adrConv state.AddressConverter + hasher hashing.Hasher + scProcessor process.SmartContractProcessor + marshalizer marshal.Marshalizer + txFeeHandler process.TransactionFeeHandler + shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine func NewTxProcessor( - accounts state.AccountsAdapter, - hasher hashing.Hasher, - addressConv state.AddressConverter, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - scProcessor process.SmartContractProcessor, - txFeeHandler process.TransactionFeeHandler, - txTypeHandler process.TxTypeHandler, + accounts state.AccountsAdapter, + hasher hashing.Hasher, + addressConv state.AddressConverter, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + scProcessor process.SmartContractProcessor, + txFeeHandler process.TransactionFeeHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if addressConv == nil || addressConv.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if scProcessor == nil || scProcessor.IsInterfaceNil() { - return nil, process.ErrNilSmartContractProcessor - } - if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { - return nil, process.ErrNilUnsignedTxHandler - } - if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { - return nil, process.ErrNilTxTypeHandler - } - - return &txProcessor{ - accounts: accounts, - hasher: hasher, - adrConv: addressConv, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - scProcessor: scProcessor, - txFeeHandler: txFeeHandler, - txTypeHandler: txTypeHandler, - }, nil + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addressConv == nil || addressConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if scProcessor == nil || scProcessor.IsInterfaceNil() { + return nil, process.ErrNilSmartContractProcessor + } + if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + return nil, process.ErrNilUnsignedTxHandler + } + if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { + return nil, process.ErrNilTxTypeHandler + } + + return &txProcessor{ + accounts: accounts, + hasher: hasher, + adrConv: addressConv, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + scProcessor: scProcessor, + txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, + }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint64) error { - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } - - adrSrc, adrDst, err := txProc.getAddresses(tx) - if err != nil { - return err - } - - acntSnd, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.checkTxValues(tx, acntSnd) - if err != nil { - return err - } - - txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) - if err != nil { - return err - } - - switch txType { - case process.MoveBalance: - return txProc.processMoveBalance(tx, adrSrc, adrDst) - case process.SCDeployment: - return txProc.processSCDeployment(tx, adrSrc, roundIndex) - case process.SCInvoking: - return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) - } - - return process.ErrWrongTransaction + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } + + adrSrc, adrDst, err := txProc.getAddresses(tx) + if err != nil { + return err + } + + acntSnd, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.checkTxValues(tx, acntSnd) + if err != nil { + return err + } + + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) + if err != nil { + return err + } + + switch txType { + case process.MoveBalance: + return txProc.processMoveBalance(tx, adrSrc, adrDst) + case process.SCDeployment: + return txProc.processSCDeployment(tx, adrSrc, roundIndex) + case process.SCInvoking: + return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) + } + + return process.ErrWrongTransaction } func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { - if acntSnd == nil { - return nil, nil - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - - txDataLen := int64(len(tx.Data)) - mutTxFee.RLock() - minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) - mutTxFee.RUnlock() - - if minFee.Cmp(cost) > 0 { - return nil, process.ErrNotEnoughFeeInTransactions - } - - if acntSnd.Balance.Cmp(cost) < 0 { - return nil, process.ErrInsufficientFunds - } - - operation := big.NewInt(0) - err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) - if err != nil { - return nil, err - } - - return cost, nil + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + mutTxFee.RLock() + minFee := big.NewInt(0) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) + mutTxFee.RUnlock() + + if minFee.Cmp(cost) > 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + return cost, nil } func (txProc *txProcessor) processMoveBalance( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } - txFee, err := txProc.processTxFee(tx, acntSrc) - if err != nil { - return err - } + txFee, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } - value := tx.Value + value := tx.Value - err = txProc.moveBalances(acntSrc, acntDst, value) - if err != nil { - return err - } + err = txProc.moveBalances(acntSrc, acntDst, value) + if err != nil { + return err + } - // is sender address in node shard - if acntSrc != nil { - err = txProc.increaseNonce(acntSrc) - if err != nil { - return err - } - } + // is sender address in node shard + if acntSrc != nil { + err = txProc.increaseNonce(acntSrc) + if err != nil { + return err + } + } - txProc.txFeeHandler.ProcessTransactionFee(txFee) + txProc.txFeeHandler.ProcessTransactionFee(txFee) - return nil + return nil } func (txProc *txProcessor) processSCDeployment( - tx *transaction.Transaction, - adrSrc state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) + return err } func (txProc *txProcessor) processSCInvoking( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } - - err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } + + err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) + return err } func (txProc *txProcessor) getAddresses( - tx *transaction.Transaction, + tx *transaction.Transaction, ) (state.AddressContainer, state.AddressContainer, error) { - //for now we assume that the address = public key - adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) - if err != nil { - return nil, nil, err - } - - adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) - if err != nil { - return nil, nil, err - } - - return adrSrc, adrDst, nil + //for now we assume that the address = public key + adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) + if err != nil { + return nil, nil, err + } + + adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) + if err != nil { + return nil, nil, err + } + + return adrSrc, adrDst, nil } func (txProc *txProcessor) getAccounts( - adrSrc, adrDst state.AddressContainer, + adrSrc, adrDst state.AddressContainer, ) (*state.Account, *state.Account, error) { - var acntSrc, acntDst *state.Account + var acntSrc, acntDst *state.Account - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - shardForDst := txProc.shardCoordinator.ComputeId(adrDst) + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + shardForDst := txProc.shardCoordinator.ComputeId(adrDst) - srcInShard := shardForSrc == shardForCurrentNode - dstInShard := shardForDst == shardForCurrentNode + srcInShard := shardForSrc == shardForCurrentNode + dstInShard := shardForDst == shardForCurrentNode - if srcInShard && adrSrc == nil || - dstInShard && adrDst == nil { - return nil, nil, process.ErrNilAddressContainer - } + if srcInShard && adrSrc == nil || + dstInShard && adrDst == nil { + return nil, nil, process.ErrNilAddressContainer + } - if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { - acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { + acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - return account, account, nil - } + return account, account, nil + } - if srcInShard { - acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if srcInShard { + acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntSrcWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntSrcWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntSrc = account - } + acntSrc = account + } - if dstInShard { - acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) - if err != nil { - return nil, nil, err - } + if dstInShard { + acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) + if err != nil { + return nil, nil, err + } - account, ok := acntDstWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntDstWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntDst = account - } + acntDst = account + } - return acntSrc, acntDst, nil + return acntSrc, acntDst, nil } func (txProc *txProcessor) getAccountFromAddress(adrSrc state.AddressContainer) (state.AccountHandler, error) { - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } func (txProc *txProcessor) checkTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - // transaction was already done at sender shard - return nil - } - - if acntSnd.GetNonce() < tx.Nonce { - return process.ErrHigherNonceInTransaction - } - if acntSnd.GetNonce() > tx.Nonce { - return process.ErrLowerNonceInTransaction - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - cost = cost.Add(cost, tx.Value) - - if cost.Cmp(big.NewInt(0)) == 0 { - return nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if stAcc.Balance.Cmp(cost) < 0 { - return process.ErrInsufficientFunds - } - - return nil + if acntSnd == nil || acntSnd.IsInterfaceNil() { + // transaction was already done at sender shard + return nil + } + + if acntSnd.GetNonce() < tx.Nonce { + return process.ErrHigherNonceInTransaction + } + if acntSnd.GetNonce() > tx.Nonce { + return process.ErrLowerNonceInTransaction + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + cost = cost.Add(cost, tx.Value) + + if cost.Cmp(big.NewInt(0)) == 0 { + return nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + + return nil } func (txProc *txProcessor) moveBalances(acntSrc, acntDst *state.Account, - value *big.Int, + value *big.Int, ) error { - operation1 := big.NewInt(0) - operation2 := big.NewInt(0) - - // is sender address in node shard - if acntSrc != nil { - err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) - if err != nil { - return err - } - } - - // is receiver address in node shard - if acntDst != nil { - err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) - if err != nil { - return err - } - } - - return nil + operation1 := big.NewInt(0) + operation2 := big.NewInt(0) + + // is sender address in node shard + if acntSrc != nil { + err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) + if err != nil { + return err + } + } + + // is receiver address in node shard + if acntDst != nil { + err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) + if err != nil { + return err + } + } + + return nil } func (txProc *txProcessor) increaseNonce(acntSrc *state.Account) error { - return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) + return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) } // IsInterfaceNil returns true if there is no value under the interface func (txProc *txProcessor) IsInterfaceNil() bool { - if txProc == nil { - return true - } - return false + if txProc == nil { + return true + } + return false } From fa8059dfd0c1bae18fe8c6634bfe17f974625e12 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Tue, 10 Sep 2019 18:36:32 +0300 Subject: [PATCH 093/234] merge development to economics --- go.sum | 1 + integrationTests/testProcessorNode.go | 4 ++++ integrationTests/testSyncNode.go | 19 ++++++++++++++++++- process/block/shardblock_test.go | 12 +++++++----- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/go.sum b/go.sum index 442aca1454b..79ee6632f67 100644 --- a/go.sum +++ b/go.sum @@ -475,6 +475,7 @@ go.dedis.ch/protobuf v1.0.5 h1:EbF1czEKICxf5KY8Tm7wMF28hcOQbB6yk4IybIFWTYE= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 9150b0269bb..9a910db563b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -830,3 +830,7 @@ func (tpn *TestProcessorNode) MiniBlocksPresent(hashes [][]byte) bool { return true } + +func (tpn *TestProcessorNode) initRounder() { + tpn.Rounder = &mock.RounderMock{} +} diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index abc663639dd..24486b2e848 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -24,13 +24,24 @@ func NewTestSyncNode( ) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNodeWithSync() @@ -41,9 +52,10 @@ func NewTestSyncNode( func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.initRounder() tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{} tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() @@ -85,6 +97,8 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.MetaDataPool, tpn.ForkDetector, tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, TestHasher, TestMarshalizer, tpn.Storage, @@ -92,6 +106,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.RequestHandler, TestUint64Converter, ) + } else { tpn.BlockProcessor, err = block.NewShardProcessor( nil, @@ -101,6 +116,8 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { TestMarshalizer, tpn.AccntState, tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, tpn.ForkDetector, tpn.BlockTracker, tpn.GenesisBlocks, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3cec4192bb7..f3181f31331 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -4888,19 +4888,21 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { t.Parallel() marshalizer := &mock.MarshalizerMock{} - poolFake := mock.NewPoolsHolderFake() + poolMock := mock.NewPoolsHolderMock() storer := &mock.ChainStorerMock{} shardC := mock.NewMultiShardsCoordinatorMock(3) sp, _ := blproc.NewShardProcessor( &mock.ServiceContainerMock{}, - poolFake, + poolMock, storer, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.AccountsStub{}, - shardC, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(shardC), @@ -4940,7 +4942,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, ok := poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, nil, metaBlockRestored) assert.False(t, ok) @@ -4958,7 +4960,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, _ = poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, meta, metaBlockRestored) assert.Nil(t, err) From 8fdf8883b8d5b174993e16793310729b655b74f6 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Tue, 10 Sep 2019 18:38:15 +0300 Subject: [PATCH 094/234] merge development to economics --- cmd/node/factory/structs.go | 6 +- consensus/mock/blockProcessorMock.go | 8 +- consensus/spos/consensusCoreValidator_test.go | 4 +- data/address/specialAddresses.go | 2 +- dataRetriever/errors.go | 2 +- dataRetriever/mock/poolsHolderStub.go | 8 +- .../requestHandlers/requestHandler.go | 2 +- integrationTests/mock/blockProcessorMock.go | 8 +- integrationTests/mock/keyMock.go | 1 + integrationTests/mock/nodesCoordinatorMock.go | 4 +- .../mock/specialAddressHandlerMock.go | 2 +- .../mock/unsignedTxHandlerMock.go | 2 +- .../interceptedHeadersSigVerification_test.go | 2 +- node/mock/blockProcessorStub.go | 8 +- node/mock/poolsHolderStub.go | 8 +- .../block/preprocess/rewardTxPreProcessor.go | 710 +- process/block/preprocess/rewardsHandler.go | 412 +- process/block/shardblock_test.go | 9410 ++++++++--------- process/coordinator/process.go | 1230 +-- .../metachain/interceptorsContainerFactory.go | 2 +- .../intermediateProcessorsContainerFactory.go | 10 +- .../shard/preProcessorsContainerFactory.go | 14 +- process/interface.go | 398 +- process/mock/blockProcessorMock.go | 8 +- process/mock/nodesCoordinatorMock.go | 250 +- process/mock/poolsHolderStub.go | 8 +- process/mock/rewardTxProcessorMock.go | 20 +- process/mock/specialAddressHandlerMock.go | 2 +- process/mock/txTypeHandlerMock.go | 2 +- process/rewardTransaction/process.go | 152 +- process/transaction/process.go | 654 +- sharding/mock/hasherMock.go | 2 +- sharding/mock/hasherStub.go | 2 +- 33 files changed, 6677 insertions(+), 6676 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 58bc38bb11e..d66ab9bf6e7 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1351,9 +1351,9 @@ func generateGenesisHeadersAndApplyInitialBalances( shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard genesisBlock, err := genesis.CreateMetaGenesisBlock( - uint64(nodesSetup.StartTime), - nodesSetup.InitialNodesPubKeys(), - ) + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) if err != nil { return nil, err diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 809d3f966b8..a027cab4aa1 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -72,8 +72,8 @@ func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(addresses []str // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index bc13a911c76..d55fac01683 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -25,8 +25,8 @@ func initConsensusDataContainer() *ConsensusCore { validatorGroupSelector := &mock.NodesCoordinatorMock{} return &ConsensusCore{ - blockChain: blockChain, - blockProcessor: blockProcessorMock, + blockChain: blockChain, + blockProcessor: blockProcessorMock, blocksTracker: blocksTrackerMock, bootstrapper: bootstrapperMock, broadcastMessenger: broadcastMessengerMock, diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 5a4fd8b7284..c16158fde18 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -94,4 +94,4 @@ func (sp *specialAddresses) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index de32e92ed53..c74a6a6d34c 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -1,7 +1,7 @@ package dataRetriever import ( - "errors" + "errors" ) // ErrNilMessage signals that a nil message has been received diff --git a/dataRetriever/mock/poolsHolderStub.go b/dataRetriever/mock/poolsHolderStub.go index df7a40999d3..d189b57d055 100644 --- a/dataRetriever/mock/poolsHolderStub.go +++ b/dataRetriever/mock/poolsHolderStub.go @@ -50,8 +50,8 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { - if phs == nil { - return true - } - return false + if phs == nil { + return true + } + return false } diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index c18a2081f3a..795af58297f 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -132,7 +132,7 @@ func (rrh *resolverRequestHandler) RequestUnsignedTransactions(destShardID uint3 } // RequestRewardTransactions requests for reward transactions from the connected peers -func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte){ +func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte) { rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) } diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 7a21af22b04..0a15b3aab12 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -101,8 +101,8 @@ func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(address []strin // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/integrationTests/mock/keyMock.go b/integrationTests/mock/keyMock.go index 6e22e985a10..1b94601ef15 100644 --- a/integrationTests/mock/keyMock.go +++ b/integrationTests/mock/keyMock.go @@ -60,6 +60,7 @@ func (sk *PrivateKeyMock) IsInterfaceNil() bool { } return false } + //------KeyGenMock func (keyGen *KeyGenMock) GeneratePair() (crypto.PrivateKey, crypto.PublicKey) { diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index d3ba7015800..560288d4016 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -5,8 +5,8 @@ import ( ) type NodesCoordinatorMock struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 1b59991984d..cf38adf5835 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -58,4 +58,4 @@ func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go index bd588555a10..7e7175bdbff 100644 --- a/integrationTests/mock/unsignedTxHandlerMock.go +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -58,4 +58,4 @@ func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index fb718f586cc..8f5dcbacda3 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -const broadcastDelay = 2* time.Second +const broadcastDelay = 2 * time.Second func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { if testing.Short() { diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 5bed3132f3a..546057a7d05 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -78,8 +78,8 @@ func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string) { // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorStub) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/node/mock/poolsHolderStub.go b/node/mock/poolsHolderStub.go index a9e4b93dacb..1ea9c0a934e 100644 --- a/node/mock/poolsHolderStub.go +++ b/node/mock/poolsHolderStub.go @@ -55,8 +55,8 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { - if phs == nil { - return true - } - return false + if phs == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 0fd14766675..8166a91009e 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -1,460 +1,460 @@ package preprocess import ( - "fmt" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type rewardTxPreprocessor struct { - *basePreProcess - chReceivedAllRewardTxs chan bool - onRequestRewardTx func(shardID uint32, txHashes [][]byte) - rewardTxsForBlock txsForBlock - rewardTxPool dataRetriever.ShardedDataCacherNotifier - storage dataRetriever.StorageService - rewardsProcessor process.RewardTransactionProcessor - accounts state.AccountsAdapter + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter } // NewRewardTxPreprocessor creates a new reward transaction preprocessor object func NewRewardTxPreprocessor( - rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, - store dataRetriever.StorageService, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - rewardProcessor process.RewardTransactionProcessor, - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), ) (*rewardTxPreprocessor, error) { - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { - return nil, process.ErrNilRewardTxDataPool - } - if store == nil || store.IsInterfaceNil() { - return nil, process.ErrNilStorage - } - if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { - return nil, process.ErrNilTxProcessor - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if onRequestRewardTransaction == nil { - return nil, process.ErrNilRequestHandler - } - - bpp := &basePreProcess{ - hasher: hasher, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - } - - rtp := &rewardTxPreprocessor{ - basePreProcess: bpp, - storage: store, - rewardTxPool: rewardTxDataPool, - onRequestRewardTx: onRequestRewardTransaction, - rewardsProcessor: rewardProcessor, - accounts: accounts, - } - - rtp.chReceivedAllRewardTxs = make(chan bool) - rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - - return rtp, nil + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilTxProcessor + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil } // waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { - select { - case <-rtp.chReceivedAllRewardTxs: - return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut - } + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } } // IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { - if requestedRewardTxs > 0 { - log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) - err := rtp.waitForRewardTxHashes(haveTime()) - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - missingRewardTxs := rtp.rewardTxsForBlock.missingTxs - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) - if err != nil { - return err - } - } - return nil + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil } // RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { - if body == nil { - return process.ErrNilTxBlockBody - } + if body == nil { + return process.ErrNilTxBlockBody + } - return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) + return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) } // RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( - body block.Body, - miniBlockPool storage.Cacher, + body block.Body, + miniBlockPool storage.Cacher, ) (int, map[int][]byte, error) { - if miniBlockPool == nil { - return 0, nil, process.ErrNilMiniBlockPool - } - - miniBlockHashes := make(map[int][]byte) - - rewardTxsRestored := 0 - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - - strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) - rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - for txHash, txBuff := range rewardTxBuff { - tx := rewardTx.RewardTx{} - err = rtp.marshalizer.Unmarshal(&tx, txBuff) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) - } - - restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) - if err != nil { - return rewardTxsRestored, miniBlockHashes, err - } - - miniBlockHashes[i] = restoredHash - rewardTxsRestored += len(miniBlock.TxHashes) - } - - return rewardTxsRestored, miniBlockHashes, nil + if miniBlockPool == nil { + return 0, nil, process.ErrNilMiniBlockPool + } + + miniBlockHashes := make(map[int][]byte) + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + miniBlockHashes[i] = restoredHash + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, miniBlockHashes, nil } // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { - // basic validation already done in interceptors - for i := 0; i < len(body); i++ { - miniBlock := body[i] - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { - continue - } - - for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { - return process.ErrTimeIsOut - } - - txHash := miniBlock.TxHashes[j] - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - if txInfo == nil || txInfo.tx == nil { - return process.ErrMissingTransaction - } - - rTx, ok := txInfo.tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - err := rtp.processRewardTransaction( - txHash, - rTx, - round, - miniBlock.SenderShardID, - miniBlock.ReceiverShardID, - ) - if err != nil { - return err - } - } - } - return nil + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if haveTime() < 0 { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txInfo == nil || txInfo.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txInfo.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil } // SaveTxBlockToStorage saves the reward transactions from body into storage func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { - for i := 0; i < len(body); i++ { - miniBlock := (body)[i] - if miniBlock.Type != block.RewardsBlock || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { - continue - } - - err := rtp.saveTxsToStorage( - miniBlock.TxHashes, - &rtp.rewardTxsForBlock, - rtp.storage, - dataRetriever.RewardTransactionUnit, - ) - if err != nil { - return err - } - } - - return nil + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock || miniBlock.ReceiverShardID != rtp.shardCoordinator.SelfId() { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil } // receivedRewardTransaction is a callback function called when a new reward transaction // is added in the reward transactions pool func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { - receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) - if receivedAllMissing { - rtp.chReceivedAllRewardTxs <- true - } + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } } // CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round func (rtp *rewardTxPreprocessor) CreateBlockStarted() { - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() } // RequestBlockTransactions request for reward transactions if missing from a block.Body func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { - requestedRewardTxs := 0 - missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { - txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} - for _, txHash := range rewardTxHashesInfo.txHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} - } - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - for senderShardID, scrHashesInfo := range missingRewardTxsForShards { - requestedRewardTxs += len(scrHashesInfo.txHashes) - rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) - } - - return requestedRewardTxs + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { + txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} + for _, txHash := range rewardTxHashesInfo.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, scrHashesInfo := range missingRewardTxsForShards { + requestedRewardTxs += len(scrHashesInfo.txHashes) + rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) + } + + return requestedRewardTxs } // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { - rewardTxsFromOthersBody := block.Body{} - for _, mb := range body { - if mb.Type != block.RewardsBlock { - continue - } - if mb.SenderShardID == rtp.shardCoordinator.SelfId() { - continue - } - - rewardTxsFromOthersBody = append(rewardTxsFromOthersBody, mb) - } - - missingTxsForShard := rtp.computeExistingAndMissing( - rewardTxsFromOthersBody, - &rtp.rewardTxsForBlock, - rtp.chReceivedAllRewardTxs, - block.RewardsBlock, - rtp.rewardTxPool, - ) - - return missingTxsForShard + rewardTxsFromOthersBody := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxsFromOthersBody = append(rewardTxsFromOthersBody, mb) + } + + missingTxsForShard := rtp.computeExistingAndMissing( + rewardTxsFromOthersBody, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) + + return missingTxsForShard } // processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool func (rtp *rewardTxPreprocessor) processRewardTransaction( - rewardTxHash []byte, - rewardTx *rewardTx.RewardTx, - round uint64, - sndShardId uint32, - dstShardId uint32, + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, ) error { - err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) - if err != nil { - return err - } + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } - txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - return nil + return nil } // RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { - missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) - rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) - return len(missingRewardTxsForMiniBlock) + return len(missingRewardTxsForMiniBlock) } // computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { - missingRewardTxs := make([][]byte, 0) - if mb.Type != block.RewardsBlock { - return missingRewardTxs - } - - for _, txHash := range mb.TxHashes { - tx, _ := process.GetTransactionHandlerFromPool( - mb.SenderShardID, - mb.ReceiverShardID, - txHash, - rtp.rewardTxPool, - ) - - if tx == nil { - missingRewardTxs = append(missingRewardTxs, txHash) - } - } - - return missingRewardTxs + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlock { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs } // getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( - mb *block.MiniBlock, - haveTime func() bool, + mb *block.MiniBlock, + haveTime func() bool, ) ([]*rewardTx.RewardTx, [][]byte, error) { - strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) - txCache := rtp.rewardTxPool.ShardDataStore(strCache) - if txCache == nil { - return nil, nil, process.ErrNilRewardTxDataPool - } - - // verify if all reward transactions exists - rewardTxs := make([]*rewardTx.RewardTx, 0) - txHashes := make([][]byte, 0) - for _, txHash := range mb.TxHashes { - if !haveTime() { - return nil, nil, process.ErrTimeIsOut - } - - tmp, ok := txCache.Peek(txHash) - if !ok { - return nil, nil, process.ErrNilRewardTransaction - } - - tx, ok := tmp.(*rewardTx.RewardTx) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } - - txHashes = append(txHashes, txHash) - rewardTxs = append(rewardTxs, tx) - } - - return rewardTxs, txHashes, nil + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, ok := txCache.Peek(txHash) + if !ok { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil } // CreateAndProcessMiniBlock creates the miniblock from storage and processes the smartContractResults added into the miniblock func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { - return nil, nil + return nil, nil } // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { - if miniBlock.Type != block.RewardsBlock { - return process.ErrWrongTypeInMiniBlock - } - - miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) - if err != nil { - return err - } - - for index := range miniBlockRewardTxs { - if !haveTime() { - return process.ErrTimeIsOut - } - - err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) - if err != nil { - return err - } - } - - txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} - - rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for index, txHash := range miniBlockTxHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} - } - rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - - return nil + if miniBlock.Type != block.RewardsBlock { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + return process.ErrTimeIsOut + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil } // CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) - if err != nil { - return nil, err - } + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } - return marshaledRewardTxs, nil + return marshaledRewardTxs, nil } // GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { - rewardTxPool := make(map[string]data.TransactionHandler) + rewardTxPool := make(map[string]data.TransactionHandler) - rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { - rewardTxPool[txHash] = txInfo.tx - } - rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txInfo.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - return rewardTxPool + return rewardTxPool } // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { - if rtp == nil { - return true - } - return false + if rtp == nil { + return true + } + return false } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index ebccc90bec7..d000d5e8a2f 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -1,17 +1,17 @@ package preprocess import ( - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) const communityPercentage = 0.1 // 1 = 100%, 0 = 0% @@ -22,177 +22,177 @@ const burnPercentage = 0.5 // 1 = 100%, 0 = 0% var rewardValue = big.NewInt(1000) type rewardsHandler struct { - address process.SpecialAddressHandler - shardCoordinator sharding.Coordinator - hasher hashing.Hasher - marshalizer marshal.Marshalizer - mut sync.Mutex - accumulatedFees *big.Int - - rewardTxsFromBlock map[string]*rewardTx.RewardTx + address process.SpecialAddressHandler + shardCoordinator sharding.Coordinator + hasher hashing.Hasher + marshalizer marshal.Marshalizer + mut sync.Mutex + accumulatedFees *big.Int + + rewardTxsFromBlock map[string]*rewardTx.RewardTx } // NewRewardTxHandler constructor for the reward transaction handler func NewRewardTxHandler( - address process.SpecialAddressHandler, - shardCoordinator sharding.Coordinator, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, + address process.SpecialAddressHandler, + shardCoordinator sharding.Coordinator, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, ) (*rewardsHandler, error) { - if address == nil || address.IsInterfaceNil() { - return nil, process.ErrNilSpecialAddressHandler - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - - rtxh := &rewardsHandler{ - address: address, - shardCoordinator: shardCoordinator, - hasher: hasher, - marshalizer: marshalizer, - } - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) - - return rtxh, nil + if address == nil || address.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + + rtxh := &rewardsHandler{ + address: address, + shardCoordinator: shardCoordinator, + hasher: hasher, + marshalizer: marshalizer, + } + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil } // SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { - //TODO implement me - save only created accumulatedFees - return nil + //TODO implement me - save only created accumulatedFees + return nil } // AddIntermediateTransactions adds intermediate transactions to local cache func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { - return nil + return nil } // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { - calculatedRewardTxs := rtxh.CreateAllUTxs() - - miniBlocks := make(map[uint32]*block.MiniBlock) - for _, rTx := range calculatedRewardTxs { - dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) - if err != nil { - log.Debug(err.Error()) - continue - } - - txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) - if err != nil { - log.Debug(err.Error()) - continue - } - - var ok bool - var mb *block.MiniBlock - if mb, ok = miniBlocks[dstShId]; !ok { - mb = &block.MiniBlock{ - ReceiverShardID: dstShId, - SenderShardID: rtxh.shardCoordinator.SelfId(), - Type: block.RewardsBlock, - } - } - - mb.TxHashes = append(mb.TxHashes, txHash) - miniBlocks[dstShId] = mb - } - - return miniBlocks + calculatedRewardTxs := rtxh.CreateAllUTxs() + + miniBlocks := make(map[uint32]*block.MiniBlock) + for _, rTx := range calculatedRewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + SenderShardID: rtxh.shardCoordinator.SelfId(), + Type: block.RewardsBlock, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks } // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { - err := rtxh.VerifyCreatedUTxs() - rtxh.CleanProcessedUTxs() + err := rtxh.VerifyCreatedUTxs() + rtxh.CleanProcessedUTxs() - return err + return err } // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { - rtxh.CleanProcessedUTxs() + rtxh.CleanProcessedUTxs() } // CleanProcessedUTxs deletes the cached data func (rtxh *rewardsHandler) CleanProcessedUTxs() { - rtxh.mut.Lock() - rtxh.accumulatedFees = big.NewInt(0) - rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) - rtxh.mut.Unlock() + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsFromBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() } // AddRewardTxFromBlock adds an existing reward transaction from block into local cache func (rtxh *rewardsHandler) AddRewardTxFromBlock(tx data.TransactionHandler) { - currRewardTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - log.Error(process.ErrWrongTypeAssertion.Error()) - return - } - - rtxh.mut.Lock() - rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx - rtxh.mut.Unlock() + currRewardTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error(process.ErrWrongTypeAssertion.Error()) + return + } + + rtxh.mut.Lock() + rtxh.rewardTxsFromBlock[string(tx.GetRecvAddress())] = currRewardTx + rtxh.mut.Unlock() } // ProcessTransactionFee adds the tx cost to the accumulated amount func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { - if cost == nil { - log.Debug(process.ErrNilValue.Error()) - return - } - - rtxh.mut.Lock() - rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) - rtxh.mut.Unlock() + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { - x := new(big.Float).SetInt(value) - y := big.NewFloat(percentage) + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) - z := new(big.Float).Mul(x, y) + z := new(big.Float).Mul(x, y) - op := big.NewInt(0) - result, _ := z.Int(op) + op := big.NewInt(0) + result, _ := z.Int(op) - return result + return result } func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) - currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) - currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() - return currTx + return currTx } func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { - currTx := &rewardTx.RewardTx{} + currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) - currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() - return currTx + return currTx } // CreateAllUTxs creates all the needed reward transactions @@ -200,120 +200,120 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { // to Elrond community fund. Fixed rewards for every validator in consensus are generated by the system. func (rtxh *rewardsHandler) CreateAllUTxs() []data.TransactionHandler { - rewardTxs := make([]data.TransactionHandler, 0) - rewardsFromFees := rtxh.createRewardTxsFromFee() - rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() + rewardTxs := make([]data.TransactionHandler, 0) + rewardsFromFees := rtxh.createRewardTxsFromFee() + rewardsForConsensus := rtxh.createRewardTxsForConsensusGroup() - rewardTxs = append(rewardTxs, rewardsFromFees...) - rewardTxs = append(rewardTxs, rewardsForConsensus...) + rewardTxs = append(rewardTxs, rewardsFromFees...) + rewardTxs = append(rewardTxs, rewardsForConsensus...) - return rewardTxs + return rewardTxs } func (rtxh *rewardsHandler) createRewardTxsFromFee() []data.TransactionHandler { - rtxh.mut.Lock() - defer rtxh.mut.Unlock() + rtxh.mut.Lock() + defer rtxh.mut.Unlock() - if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { - rtxh.accumulatedFees = big.NewInt(0) - return nil - } + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } - leaderTx := rtxh.createLeaderTx() - communityTx := rtxh.createCommunityTx() - burnTx := rtxh.createBurnTx() + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() - currFeeTxs := make([]data.TransactionHandler, 0) - currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) - rtxh.accumulatedFees = big.NewInt(0) + rtxh.accumulatedFees = big.NewInt(0) - return currFeeTxs + return currFeeTxs } func (rtxh *rewardsHandler) createRewardTxsForConsensusGroup() []data.TransactionHandler { - consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() - consensusRewardTxs := make([]data.TransactionHandler, 0) - for _, address := range consensusRewardAddresses { - rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue - rTx.RcvAddr = []byte(address) + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardAddresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) - consensusRewardTxs = append(consensusRewardTxs, rTx) - } + consensusRewardTxs = append(consensusRewardTxs, rTx) + } - return consensusRewardTxs + return consensusRewardTxs } // VerifyCreatedUTxs creates all fee txs from added values, than verifies if in block the values are the same func (rtxh *rewardsHandler) VerifyCreatedUTxs() error { - calculatedFeeTxs := rtxh.CreateAllUTxs() - - rtxh.mut.Lock() - defer rtxh.mut.Unlock() - - totalFeesFromBlock := big.NewInt(0) - for _, value := range rtxh.rewardTxsFromBlock { - totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) - } - - totalCalculatedFees := big.NewInt(0) - for _, value := range calculatedFeeTxs { - totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) - - txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] - if !ok { - return process.ErrTxsFeesNotFound - } - if txFromBlock.Value.Cmp(value.GetValue()) != 0 { - return process.ErrTxsFeesDoNotMatch - } - } - - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - - return nil + calculatedFeeTxs := rtxh.CreateAllUTxs() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, value := range rtxh.rewardTxsFromBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, value.Value) + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedFeeTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + txFromBlock, ok := rtxh.rewardTxsFromBlock[string(value.GetRecvAddress())] + if !ok { + return process.ErrTxsFeesNotFound + } + if txFromBlock.Value.Cmp(value.GetValue()) != 0 { + return process.ErrTxsFeesDoNotMatch + } + } + + if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { + return process.ErrTotalTxsFeesDoNotMatch + } + + return nil } // CreateMarshalizedData creates the marshalized data for broadcasting purposes func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - // TODO: implement me + // TODO: implement me - return make([][]byte, 0), nil + return make([][]byte, 0), nil } // GetAllCurrentFinishedTxs returns the cached finalized transactions for current round func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { - rtxh.mut.Lock() - - rewardTxPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range rtxh.rewardTxsFromBlock { - - senderShard := txInfo.ShardId - receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) - if err != nil { - continue - } - if receiverShard != rtxh.shardCoordinator.SelfId() { - continue - } - if senderShard != rtxh.shardCoordinator.SelfId() { - continue - } - rewardTxPool[txHash] = txInfo - } - rtxh.mut.Unlock() - - return rewardTxPool + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range rtxh.rewardTxsFromBlock { + + senderShard := txInfo.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = txInfo + } + rtxh.mut.Unlock() + + return rewardTxPool } // IsInterfaceNil returns true if there is no value under the interface func (rtxh *rewardsHandler) IsInterfaceNil() bool { - if rtxh == nil { - return true - } - return false + if rtxh == nil { + return true + } + return false } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index f3181f31331..d0042755f08 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1,4887 +1,4887 @@ package block_test import ( - "bytes" - "errors" - "fmt" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/indexer" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/blockchain" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - blproc "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/coordinator" - "github.com/ElrondNetwork/elrond-go/process/factory/shard" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" + "bytes" + "errors" + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/blockchain" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + blproc "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" ) //------- NewShardProcessor func initAccountsMock() *mock.AccountsStub { - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - return &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } + return &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + } } func initBasicTestData() (*mock.PoolsHolderMock, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Round: 1, - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Round: 1, + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash } func initBlockHeader(prevHash []byte, prevRandSeed []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { - hdr := block.Header{ - Nonce: 2, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevRandSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - return hdr + hdr := block.Header{ + Nonce: 2, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevRandSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + return hdr } type methodFlags struct { - revertToSnapshotCalled bool - rootHashCalled bool + revertToSnapshotCalled bool + rootHashCalled bool } func defaultShardProcessor() (process.BlockProcessor, *methodFlags, error) { - // set accounts not dirty - flags := &methodFlags{} - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - flags.revertToSnapshotCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - flags.rootHashCalled = true - return []byte("rootHash"), nil - } - - accStub := initAccountsMock() - accStub.JournalLenCalled = journalLen - accStub.RevertToSnapshotCalled = revertToSnapshot - accStub.RootHashCalled = rootHashCalled - - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accStub, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - return sp, flags, err + // set accounts not dirty + flags := &methodFlags{} + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + flags.revertToSnapshotCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + flags.rootHashCalled = true + return []byte("rootHash"), nil + } + + accStub := initAccountsMock() + accStub.JournalLenCalled = journalLen + accStub.RevertToSnapshotCalled = revertToSnapshot + accStub.RootHashCalled = rootHashCalled + + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accStub, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + return sp, flags, err } //------- NewBlockProcessor func TestNewBlockProcessor_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - nil, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilDataPoolHolder, err) - assert.Nil(t, sp) + t.Parallel() + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + nil, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilDataPoolHolder, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilStoreShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - nil, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilStorage, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + nil, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilStorage, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - nil, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilHasher, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilMarshalizerShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - nil, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + nil, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilAccountsAdapterShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilAccountsAdapter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilAccountsAdapter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - nil, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilForkDetector, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilForkDetector, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - nil, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilBlocksTracker, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + nil, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilBlocksTracker, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - nil, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilRequestHandler, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + nil, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilRequestHandler, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTransactionPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return nil - } - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionPool, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return nil + } + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionPool, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilTxCoordinator(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - nil, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.Equal(t, process.ErrNilTransactionCoordinator, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + nil, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.Equal(t, process.ErrNilTransactionCoordinator, err) + assert.Nil(t, sp) } func TestNewShardProcessor_NilUint64Converter(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - nil, - ) - assert.Equal(t, process.ErrNilUint64Converter, err) - assert.Nil(t, sp) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + nil, + ) + assert.Equal(t, process.ErrNilUint64Converter, err) + assert.Nil(t, sp) } func TestNewShardProcessor_OkValsShouldWork(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, err := defaultShardProcessor() - assert.Nil(t, err) - assert.NotNil(t, sp) + sp, _, err := defaultShardProcessor() + assert.Nil(t, err) + assert.NotNil(t, sp) } //------- ProcessBlock func TestShardProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) - assert.Equal(t, process.ErrNilBlockChain, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(nil, &block.Header{}, blk, haveTime) + assert.Equal(t, process.ErrNilBlockChain, err) } func TestShardProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - body := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) - assert.Equal(t, process.ErrNilBlockHeader, err) + sp, _, _ := defaultShardProcessor() + body := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, nil, body, haveTime) + assert.Equal(t, process.ErrNilBlockHeader, err) } func TestShardProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) - assert.Equal(t, process.ErrNilBlockBody, err) + sp, _, _ := defaultShardProcessor() + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, nil, haveTime) + assert.Equal(t, process.ErrNilBlockBody, err) } func TestShardProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { - t.Parallel() + t.Parallel() - sp, _, _ := defaultShardProcessor() - blk := make(block.Body, 0) - err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) - assert.Equal(t, process.ErrNilHaveTimeHandler, err) + sp, _, _ := defaultShardProcessor() + blk := make(block.Body, 0) + err := sp.ProcessBlock(&blockchain.BlockChain{}, &block.Header{}, blk, nil) + assert.Equal(t, process.ErrNilHaveTimeHandler, err) } func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - // set accounts dirty - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("roothash"), - } - body := make(block.Body, 0) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.NotNil(t, err) - assert.Equal(t, process.ErrAccountStateDirty, err) + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + // set accounts dirty + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("roothash"), + } + body := make(block.Body, 0) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.NotNil(t, err) + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - sp, _, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) -} + t.Parallel() -func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{} - - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { return nil } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - initStore(), - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - return process.ErrHigherNonceInTransaction - }, - }, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err = sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHigherNonceInTransaction, err) -} + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) -} + sp, _, _ := defaultShardProcessor() -func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { - t.Parallel() - - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 0, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte(""), - PrevRandSeed: []byte("rand seed"), - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{} - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } -func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - sp, _, _ := defaultShardProcessor() - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - PrevHash: []byte("zzz"), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - RootHash: []byte("root hash"), - } - body := make(block.Body, 0) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) -} +func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{} + + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldRevertState(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHash"), - MiniBlockHeaders: mbHdrs, - } - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return []byte("rootHash"), nil - } - - err := errors.New("process block transaction error") - txProcess := func(transaction *transaction.Transaction, round uint64) error { - return err - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} - store := &mock.ChainStorerMock{} - accounts := &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - } - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizer, - hasher, - tdp, - &mock.AddressConverterMock{}, - accounts, - &mock.RequestHandlerMock{}, - tpm, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accounts, - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - ProbableHighestNonceCalled: func() uint64 { - return 0 - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, err, err2) - assert.True(t, wasCalled) -} + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: []byte("rootHashX"), - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) - assert.True(t, flags.revertToSnapshotCalled) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { return nil } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } -func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - sp, flags, _ := defaultShardProcessor() - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Nil(t, err) - assert.False(t, flags.revertToSnapshotCalled) -} + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(5), + initStore(), + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + return process.ErrHigherNonceInTransaction + }, + }, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() -func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { - t.Parallel() - - randSeed := []byte("rand seed") - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 0, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - tx := &transaction.Transaction{} - tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := block.Header{ - Round: 1, - Nonce: 1, - PrevHash: []byte(""), - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) -} + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) -func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - randSeed := []byte("rand seed") - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - wasCalled := false - revertToSnapshot := func(snapshot int) error { - wasCalled = true - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) - assert.False(t, wasCalled) -} + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) -func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) { - t.Parallel() - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 0, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - currHdr := blkc.GetCurrentBlockHeader() - preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) - hdr := block.Header{ - Round: 2, - Nonce: 2, - PrevHash: preHash, - PrevRandSeed: randSeed, - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - haveTimeLessThanZero := func() time.Duration { - return -1 * time.Millisecond - } - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) - assert.Equal(t, process.ErrTimeIsOut, err) + // should return err + err = sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHigherNonceInTransaction, err) } -func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { - t.Parallel() - - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - randSeed := []byte("rand seed") - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) -} +func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { + t.Parallel() -func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing.T) { - t.Parallel() - - txHash := []byte("tx_hash1") - tdp := initDataPool(txHash) - randSeed := []byte("rand seed") - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - SenderShardID: 1, - ReceiverShardID: 0, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - // should return err - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } -//------- checkAndRequestIfMetaHeadersMissing -func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing.T) { - t.Parallel() - - hdrNoncesRequestCalled := int32(0) - tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - randSeed := []byte("rand seed") - - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - // set accounts not dirty - journalLen := func() int { return 0 } - revertToSnapshot := func(snapshot int) error { - return nil - } - rootHashCalled := func() ([]byte, error) { - return rootHash, nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{ - RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { - atomic.AddInt32(&hdrNoncesRequestCalled, 1) - }, - }, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - - sp.CheckAndRequestIfMetaHeadersMissing(2) - time.Sleep(100 * time.Millisecond) - assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) - assert.Equal(t, err, process.ErrTimeIsOut) -} +func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { + t.Parallel() -//-------- isMetaHeaderFinal -func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) - res := sp.IsMetaHeaderFinal(&hdr, nil, 0) - assert.False(t, res) - res = sp.IsMetaHeaderFinal(nil, nil, 0) - assert.False(t, res) - - meta = &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - ordered, _ := sp.GetOrderedMetaBlocks(3) - res = sp.IsMetaHeaderFinal(meta, ordered, 0) - assert.True(t, res) + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 0, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{} + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrWrongNonceInBlock, err) } -//-------- requestFinalMissingHeaders -func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - sp.SetCurrHighestMetaHdrNonce(1) - res := sp.RequestFinalMissingHeaders() - assert.Equal(t, res > 0, true) -} +func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { + t.Parallel() -//--------- verifyIncludedMetaBlocksFinality -func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - randSeed := []byte("rand seed") - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - prevMeta := genesisBlocks[sharding.MetachainShardId] - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMeta.GetRandSeed(), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - - tdp.MetaBlocks().Put(metaHash, meta) - - prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: prevHash, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr.Round = 4 - - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Nil(t, err) + randSeed := []byte("rand seed") + sp, _, _ := defaultShardProcessor() + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + PrevHash: []byte("zzz"), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + RootHash: []byte("root hash"), + } + body := make(block.Body, 0) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) } -func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { - t.Parallel() +func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldRevertState(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) - mbHdrs := make([]block.MiniBlockHeader, 0) - rootHash := []byte("rootHash") - txHash := []byte("txhash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} - tdp := mock.NewPoolsHolderMock() - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHash"), + MiniBlockHeaders: mbHdrs, + } - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return []byte("rootHash"), nil + } - randSeed := []byte("rand seed") - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + err := errors.New("process block transaction error") + txProcess := func(transaction *transaction.Transaction, round uint64) error { + return err + } - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) - hdr.Round = 0 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) -} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tpm := &mock.TxProcessorMock{ProcessTransactionCalled: txProcess} + store := &mock.ChainStorerMock{} + accounts := &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizer, + hasher, + tdp, + &mock.AddressConverterMock{}, + accounts, + &mock.RequestHandlerMock{}, + tpm, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() -//------- CommitBlock + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accounts, + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) -func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk := make(block.Body, 0) - - err := sp.CommitBlock(nil, &block.Header{}, blk) - assert.Equal(t, process.ErrNilBlockChain, err) -} + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) -func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - errMarshalizer := errors.New("failure") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) (i []byte, e error) { - if reflect.DeepEqual(obj, hdr) { - return nil, errMarshalizer - } - - return []byte("obj"), nil - }, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizer, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blkc := createTestBlockchain() - - err := sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, errMarshalizer, err) + // should return err + err2 := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, err, err2) + assert.True(t, wasCalled) } -func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - errPersister := errors.New("failure") - wasCalled := false - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - CommitCalled: func() ([]byte, error) { - return nil, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - hdrUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - HasCalled: func(key []byte) error { - return nil - }, - } - store := initStore() - store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err := sp.CommitBlock(blkc, hdr, body) - assert.True(t, wasCalled) - assert.Nil(t, err) -} +func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { + t.Parallel() -func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - wasCalled := false - errPersister := errors.New("failure") - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - CommitCalled: func() (i []byte, e error) { - return nil, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{} - body := make(block.Body, 0) - body = append(body, &mb) - - miniBlockUnit := &mock.StorerStub{ - PutCalled: func(key, data []byte) error { - wasCalled = true - return errPersister - }, - } - store := initStore() - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - }, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, err) - - blkc, _ := blockchain.NewBlockChain( - generateTestCache(), - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - err = sp.CommitBlock(blkc, hdr, body) - - assert.Nil(t, err) - assert.True(t, wasCalled) -} + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := createTestBlockchain() - err := sp.CommitBlock(blkc, hdr, body) - - assert.Equal(t, process.ErrNilDataPoolHolder, err) -} + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - txCache := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - LenCalled: func() int { - return 0 - }, - } - tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return txCache - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { - }, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) { - - }, - } - } - - txHash := []byte("txHash") - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - &mock.MarshalizerMock{}, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - - err = sp.CommitBlock(blkc, hdr, body) - assert.Equal(t, process.ErrMissingTransaction, err) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: []byte("rootHashX"), + MiniBlockHeaders: mbHdrs, + } -func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - randSeed := []byte("rand seed") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - RandSeed: randSeed, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: randSeed, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - forkDetectorAddCalled := false - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - if header == hdr { - forkDetectorAddCalled = true - return nil - } - - return errors.New("should have not got here") - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - assert.True(t, forkDetectorAddCalled) - assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) - //this should sleep as there is an async call to display current header and block in CommitBlock - time.Sleep(time.Second) + sp, flags, _ := defaultShardProcessor() + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.True(t, flags.revertToSnapshotCalled) } -func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash := []byte("tx_hash1") - - rootHash := []byte("root hash") - hdrHash := []byte("header hash") - randSeed := []byte("rand seed") - - prevHdr := &block.Header{ - Nonce: 0, - Round: 0, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - RandSeed: randSeed, - } - - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: rootHash, - PrevHash: hdrHash, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: randSeed, - } - mb := block.MiniBlock{ - TxHashes: [][]byte{txHash}, - } - body := block.Body{&mb} - - mbHdr := block.MiniBlockHeader{ - TxCount: uint32(len(mb.TxHashes)), - Hash: hdrHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - hdr.MiniBlockHeaders = mbHdrs - - accounts := &mock.AccountsStub{ - CommitCalled: func() (i []byte, e error) { - return rootHash, nil - }, - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - } - fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { - return nil - }, - } - hasher := &mock.HasherStub{} - hasher.ComputeCalled = func(s string) []byte { - return hdrHash - } - store := initStore() - - var saveBlockCalled map[string]data.TransactionHandler - saveBlockCalledMutex := sync.Mutex{} - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{ - IndexerCalled: func() indexer.Indexer { - return &mock.IndexerMock{ - SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { - saveBlockCalledMutex.Lock() - saveBlockCalled = txPool - saveBlockCalledMutex.Unlock() - }, - } - }, - }, - tdp, - store, - hasher, - &mock.MarshalizerMock{}, - accounts, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - fd, - &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{ - GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { - switch blockType { - case block.TxBlock: - return map[string]data.TransactionHandler{ - "tx_1": &transaction.Transaction{Nonce: 1}, - "tx_2": &transaction.Transaction{Nonce: 2}, - } - case block.SmartContractResultBlock: - return map[string]data.TransactionHandler{ - "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, - "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, - } - default: - return nil - } - }, - }, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blkc := createTestBlockchain() - blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { - return prevHdr - } - blkc.GetCurrentBlockHeaderHashCalled = func() []byte { - return hdrHash - } - err := sp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Nil(t, err) - err = sp.CommitBlock(blkc, hdr, body) - assert.Nil(t, err) - - // Wait for the index block go routine to start - time.Sleep(time.Second * 2) - - saveBlockCalledMutex.Lock() - wasCalled := saveBlockCalled - saveBlockCalledMutex.Unlock() - - assert.Equal(t, 4, len(wasCalled)) -} +func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { + t.Parallel() -func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 3 } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - bl, err := sp.CreateBlockBody(0, func() bool { return true }) - // nil block - assert.Nil(t, bl) - // error - assert.Equal(t, process.ErrAccountStateDirty, err) -} + randSeed := []byte("rand seed") + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - revToSnapshot := func(snapshot int) error { return nil } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - haveTime := func() bool { - return false - } - bl, err := sp.CreateBlockBody(0, haveTime) - // no error - assert.Equal(t, process.ErrTimeIsOut, err) - // no miniblocks - assert.Nil(t, bl) -} + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - journalLen := func() int { return 0 } - rootHashfunc := func() ([]byte, error) { - return []byte("roothash"), nil - } - haveTime := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RootHashCalled: rootHashfunc, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - blk, err := sp.CreateBlockBody(0, haveTime) - assert.NotNil(t, blk) - assert.Nil(t, err) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } -//------- ComputeNewNoncePrevHash + sp, flags, _ := defaultShardProcessor() -func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerStub{} - hasher := &mock.HasherStub{} - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr, txBlock := createTestHdrTxBlockBody() - marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { - if hdr == obj { - return []byte("hdrHeaderMarshalized"), nil - } - if reflect.DeepEqual(txBlock, obj) { - return []byte("txBlockBodyMarshalized"), nil - } - return nil, nil - } - hasher.ComputeCalled = func(s string) []byte { - if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") - } - if s == "txBlockBodyMarshalized" { - return []byte("tx block body hash") - } - return nil - } - _, err := be.ComputeHeaderHash(hdr) - assert.Nil(t, err) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Nil(t, err) + assert.False(t, flags.revertToSnapshotCalled) } -func createTestHdrTxBlockBody() (*block.Header, block.Body) { - hasher := mock.HasherMock{} - hdr := &block.Header{ - Nonce: 1, - ShardId: 2, - Epoch: 3, - Round: 4, - TimeStamp: uint64(11223344), - PrevHash: hasher.Compute("prev hash"), - PubKeysBitmap: []byte{255, 0, 128}, - Signature: hasher.Compute("signature"), - RootHash: hasher.Compute("root hash"), - } - txBlock := block.Body{ - { - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_0_1"), - hasher.Compute("txHash_0_2"), - }, - }, - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_1_1"), - hasher.Compute("txHash_1_2"), - }, - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("txHash_2_1"), - }, - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - return hdr, txBlock -} +func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { + t.Parallel() -//------- ComputeNewNoncePrevHash + randSeed := []byte("rand seed") + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 0, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_DisplayLogInfo(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - hasher := mock.HasherMock{} - hdr, txBlock := createTestHdrTxBlockBody() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - assert.NotNil(t, sp) - hdr.PrevHash = hasher.Compute("prev hash") - sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) -} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + tx := &transaction.Transaction{} + tdp.Transactions().AddData(txHash, tx, shardCoordinator.CommunicationIdentifier(0)) -func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.NotNil(t, mbHeaders) - assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) -} + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{Fail: true}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.NotNil(t, err) - assert.Nil(t, mbHeaders) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := block.Header{ + Round: 1, + Nonce: 1, + PrevHash: []byte(""), + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) -func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { - t.Parallel() - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := block.Body{ - { - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 2, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - { - ReceiverShardID: 3, - SenderShardID: 0, - TxHashes: make([][]byte, 0), - }, - } - mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { - return true - }) - assert.Nil(t, err) - assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } -func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) { - t.Parallel() - // set accounts dirty - journalEntries := 3 - revToSnapshot := func(snapshot int) error { - journalEntries = 0 - return nil - } - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: revToSnapshot, - }, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := bp.CommitBlock(nil, nil, nil) - assert.NotNil(t, err) - assert.Equal(t, 0, journalEntries) -} +func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { + t.Parallel() -func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - txHash1 := []byte("txHash1") - mb1 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash1}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - body = append(body, &mb1) - body = append(body, &mb0) - body = append(body, &mb1) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.NotNil(t, msh) - assert.NotNil(t, mstx) - _, found := msh[0] - assert.False(t, found) - - expectedBody := make(block.Body, 0) - err = marshalizer.Unmarshal(&expectedBody, msh[1]) - assert.Nil(t, err) - assert.Equal(t, len(expectedBody), 2) - assert.Equal(t, &mb1, expectedBody[0]) - assert.Equal(t, &mb1, expectedBody[1]) -} + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) -func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - wr := &wrongBody{} - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) - assert.Equal(t, process.ErrWrongTypeAssertion, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) -} + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) -func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizer := &mock.MarshalizerMock{ - Fail: false, - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) - assert.Equal(t, process.ErrNilMiniBlocks, err) - assert.Nil(t, msh) - assert.Nil(t, mstx) -} + tdp.MetaBlocks().Put(metaHash, meta) -func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { - t.Parallel() - wasCalled := false - tdp := initDataPool([]byte("tx_hash1")) - txHash0 := []byte("txHash0") - mb0 := block.MiniBlock{ - ReceiverShardID: 1, - SenderShardID: 0, - TxHashes: [][]byte{txHash0}, - } - body := make(block.Body, 0) - body = append(body, &mb0) - marshalizer := &mock.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - wasCalled = true - return nil, process.ErrMarshalWithoutSuccess - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - initStore(), - marshalizer, - &mock.HasherMock{}, - tdp, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - tdp, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) - assert.Nil(t, err) - assert.True(t, wasCalled) - assert.Equal(t, 0, len(msh)) - assert.Equal(t, 0, len(mstx)) -} + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) -//------- receivedMetaBlock + tdp.MetaBlocks().Put(metaHash, meta) -func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - miniBlockHash2 := []byte("miniblock hash 2") - miniBlockHash3 := []byte("miniblock hash 3") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - string(miniBlockHash2): 0, - string(miniBlockHash3): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - miniBlockHash1Requested := int32(0) - miniBlockHash2Requested := int32(0) - miniBlockHash3Requested := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - if bytes.Equal(miniBlockHash1, miniblockHash) { - atomic.AddInt32(&miniBlockHash1Requested, 1) - } - if bytes.Equal(miniBlockHash2, miniblockHash) { - atomic.AddInt32(&miniBlockHash2Requested, 1) - } - if bytes.Equal(miniBlockHash3, miniblockHash) { - atomic.AddInt32(&miniBlockHash3Requested, 1) - } - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - bp.ReceivedMetaBlock(metaBlockHash) - - //we have to wait to be sure txHash1Requested is not incremented by a late call - time.Sleep(time.Second) - - assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) - assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) -} + // set accounts not dirty + journalLen := func() int { return 0 } + wasCalled := false + revertToSnapshot := func(snapshot int) error { + wasCalled = true + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) -//--------- receivedMetaBlockNoMissingMiniBlocks -func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a metablock that will return 3 miniblock hashes - //1 miniblock hash will be in cache - //2 will be requested on network - - miniBlockHash1 := []byte("miniblock hash 1 found in cache") - - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - } - }, - } - - //put this metaBlock inside datapool - metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) - //put the existing miniblock inside datapool - dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) - - noOfMissingMiniBlocks := int32(0) - - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - atomic.AddInt32(&noOfMissingMiniBlocks, 1) - }} - - tc, _ := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - requestHandler, - &mock.PreProcessorContainerMock{}, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - requestHandler, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - sp.ReceivedMetaBlock(metaBlockHash) - assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrCrossShardMBWithoutConfirmationFromMeta, err) + assert.False(t, wasCalled) } -//--------- createAndProcessCrossMiniBlocksDstMe -func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: mbHdr.ReceiverShardID, - SenderShardId: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - tdp.MetaBlocks().Put(metaHash, meta) - - haveTimeTrue := func() bool { - return true - } - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - assert.Equal(t, err == nil, true) - assert.Equal(t, len(miniBlockSlice) == 0, true) - assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) - assert.Equal(t, noOfTxs, uint32(0)) -} +func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) { + t.Parallel() + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte(nil) - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - - startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - startHeaders[sharding.MetachainShardId] = &block.Header{} - - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - startHeaders, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - assert.Nil(t, sp) - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlocksInMetaBlock(t *testing.T) { - t.Parallel() - - haveTimeTrue := func() bool { - return true - } - tdp := mock.NewPoolsHolderMock() - destShardId := uint32(2) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - miniblocks := make([]*block.MiniBlock, 6) - - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - } - - //put 2 metablocks in pool - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 1, - PrevRandSeed: []byte("roothash"), - } - - mb1Hash := []byte("meta block 1") - tdp.MetaBlocks().Put( - mb1Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: createShardData(hasher, marshalizer, miniBlocks), - Round: 2, - } - - mb2Hash := []byte("meta block 2") - tdp.MetaBlocks().Put( - mb2Hash, - meta, - ) - - meta = &block.MetaBlock{ - Nonce: 3, - ShardInfo: make([]block.ShardData, 0), - Round: 3, - PrevRandSeed: []byte("roothash"), - } - - mb3Hash := []byte("meta block 3") - tdp.MetaBlocks().Put( - mb3Hash, - meta, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) - - assert.Equal(t, 0, len(miniBlocksReturned)) - assert.Equal(t, 0, len(usedMetaHdrsHashes)) - assert.Equal(t, uint32(0), nrTxAdded) - assert.Nil(t, err) + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 0, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + currHdr := blkc.GetCurrentBlockHeader() + preHash, _ := core.CalculateHash(marshalizer, hasher, currHdr) + hdr := block.Header{ + Round: 2, + Nonce: 2, + PrevHash: preHash, + PrevRandSeed: randSeed, + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + haveTimeLessThanZero := func() time.Duration { + return -1 * time.Millisecond + } + + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTimeLessThanZero) + assert.Equal(t, process.ErrTimeIsOut, err) } -//------- createMiniBlocks +func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { + t.Parallel() -func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - //we will have a 3 txs in pool - - txHash1 := []byte("tx hash 1") - txHash2 := []byte("tx hash 2") - txHash3 := []byte("tx hash 3") - - senderShardId := uint32(0) - receiverShardId := uint32(0) - - tx1Nonce := uint64(45) - tx2Nonce := uint64(46) - tx3Nonce := uint64(47) - - //put the existing tx inside datapool - cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) - dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ - Nonce: tx1Nonce, - Data: string(txHash1), - }, cacheId) - dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ - Nonce: tx2Nonce, - Data: string(txHash2), - }, cacheId) - dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ - Nonce: tx3Nonce, - Data: string(txHash3), - }, cacheId) - - tx1ExecutionResult := uint64(0) - tx2ExecutionResult := uint64(0) - tx3ExecutionResult := uint64(0) - - txProcessorMock := &mock.TxProcessorMock{ - ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { - //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { - tx1ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash2) { - tx2ExecutionResult = transaction.Nonce - } - if transaction.Data == string(txHash3) { - tx3ExecutionResult = transaction.Nonce - } - - return nil - }, - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - accntAdapter := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - shardCoordinator, - initStore(), - marshalizer, - hasher, - dataPool, - &mock.AddressConverterMock{}, - accntAdapter, - &mock.RequestHandlerMock{}, - txProcessorMock, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - accntAdapter, - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - initStore(), - hasher, - marshalizer, - accntAdapter, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) - - assert.Nil(t, err) - //testing execution - assert.Equal(t, tx1Nonce, tx1ExecutionResult) - assert.Equal(t, tx2Nonce, tx2ExecutionResult) - assert.Equal(t, tx3Nonce, tx3ExecutionResult) - //one miniblock output - assert.Equal(t, 1, len(blockBody)) - //miniblock should have 3 txs - assert.Equal(t, 3, len(blockBody[0].TxHashes)) - //testing all 3 hashes are present in block body - assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) - assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) -} + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + randSeed := []byte("rand seed") + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) -func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { - t.Parallel() - - //we have 3 metablocks in pool each containing 2 miniblocks. - //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks - //The test should remove only one metablock - - destShardId := uint32(2) - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - - miniblocks := make([]*block.MiniBlock, 6) - miniblockHashes := make([][]byte, 6) - - destShards := []uint32{1, 3, 4} - for i := 0; i < 6; i++ { - mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) - miniblocks[i] = mb - miniblockHashes[i] = hash - } - - //put 3 metablocks in pool - mb1Hash := []byte("meta block 1") - dataPool.MetaBlocks().Put( - mb1Hash, - createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), - ) - mb2Hash := []byte("meta block 2") - dataPool.MetaBlocks().Put( - mb2Hash, - createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), - ) - mb3Hash := []byte("meta block 3") - dataPool.MetaBlocks().Put( - mb3Hash, - createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), - ) - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.CurrentShard = destShardId - shardCoordinator.SetNoShards(destShardId + 1) - - bp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - shardCoordinator, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - }, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - }, - createGenesisBlocks(shardCoordinator), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - //create block body with first 3 miniblocks from miniblocks var - blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} - - hashes := make([][]byte, 0) - hashes = append(hashes, mb1Hash) - hashes = append(hashes, mb2Hash) - hashes = append(hashes, mb3Hash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) - - assert.Nil(t, err) - //check WasMiniBlockProcessed for remaining metablocks - metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) - assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) - assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) - - metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) -} + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) -func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - - be, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := be.RestoreBlockIntoPools(nil, nil) - assert.NotNil(t, err) - assert.Equal(t, process.ErrNilBlockHeader, err) -} + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) -func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - initStore(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.RestoreBlockIntoPools(&block.Header{}, nil) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilTxBlockBody) -} + meta = block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) -func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { - t.Parallel() - - txHash := []byte("tx hash 1") - - dataPool := mock.NewPoolsHolderMock() - marshalizerMock := &mock.MarshalizerMock{} - hasherMock := &mock.HasherStub{} - - body := make(block.Body, 0) - tx := transaction.Transaction{Nonce: 1} - buffTx, _ := marshalizerMock.Marshal(tx) - - store := &mock.ChainStorerMock{ - GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { - m := make(map[string][]byte, 0) - m[string(txHash)] = buffTx - return m, nil - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - } - - factory, _ := shard.NewPreProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - store, - marshalizerMock, - hasherMock, - dataPool, - &mock.AddressConverterMock{}, - initAccountsMock(), - &mock.RequestHandlerMock{}, - &mock.TxProcessorMock{}, - &mock.SCProcessorMock{}, - &mock.SmartContractResultsProcessorMock{}, - &mock.RewardTxProcessorMock{}, - ) - container, _ := factory.Create() - - tc, err := coordinator.NewTransactionCoordinator( - mock.NewMultiShardsCoordinatorMock(3), - initAccountsMock(), - dataPool, - &mock.RequestHandlerMock{}, - container, - &mock.InterimProcessorContainerMock{}, - ) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasherMock, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - tc, - &mock.Uint64ByteSliceConverterMock{}, - ) - - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - miniblockHash := []byte("mini block hash 1") - hasherMock.ComputeCalled = func(s string) []byte { - return miniblockHash - } - - metablockHash := []byte("meta block hash 1") - metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - metablockHeader.SetMiniBlockProcessed(metablockHash, true) - dataPool.MetaBlocks().Put( - metablockHash, - metablockHeader, - ) - - err = sp.RestoreBlockIntoPools(&block.Header{}, body) - - miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) - txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) - metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) - metablock := metablockFromPool.(*block.MetaBlock) - assert.Nil(t, err) - assert.Equal(t, &miniblock, miniblockFromPool) - assert.Equal(t, &tx, txFromPool) - assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) -} + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) -func TestShardProcessor_DecodeBlockBody(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - body := make(block.Body, 0) - body = append(body, &block.MiniBlock{ReceiverShardID: 69}) - message, err := marshalizerMock.Marshal(body) - assert.Nil(t, err) - - dcdBlk := sp.DecodeBlockBody(nil) - assert.Nil(t, dcdBlk) - - dcdBlk = sp.DecodeBlockBody(message) - assert.Equal(t, body, dcdBlk) - assert.Equal(t, uint32(69), body[0].ReceiverShardID) -} + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) -func TestShardProcessor_DecodeBlockHeader(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - marshalizerMock := &mock.MarshalizerMock{} - sp, err := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - tdp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - marshalizerMock, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - hdr := &block.Header{} - hdr.Nonce = 1 - hdr.TimeStamp = uint64(0) - hdr.Signature = []byte("A") - message, err := marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - message, err = marshalizerMock.Marshal(hdr) - assert.Nil(t, err) - - dcdHdr := sp.DecodeBlockHeader(nil) - assert.Nil(t, dcdHdr) - - dcdHdr = sp.DecodeBlockHeader(message) - assert.Equal(t, hdr, dcdHdr) - assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) } -func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := initDataPool([]byte("tx_hash1")) - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - lastHdr := &block.MetaBlock{Round: 9, - Nonce: 44, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - err := sp.IsHdrConstructionValid(nil, prevHdr) - assert.Equal(t, err, process.ErrNilBlockHeader) - - err = sp.IsHdrConstructionValid(currHdr, nil) - assert.Equal(t, err, process.ErrNilBlockHeader) - - currHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - prevHdr.RootHash = nil - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) - - currHdr.Nonce = 46 - prevHdr.Nonce = 45 - prevHdr.Round = currHdr.Round + 1 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) - - prevHdr.Round = currHdr.Round - 1 - currHdr.Nonce = prevHdr.Nonce + 2 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrWrongNonceInBlock) - - currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) - - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) - - currHdr.PrevHash = prevHash - prevHdr.RootHash = []byte("prevRootHash") - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) -} +func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing.T) { + t.Parallel() -func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - shardHdr := &block.Header{Round: 15} - shardBlock := block.Body{} - - blockHeader := &block.Header{} - - // test header not in pool and defer called - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - assert.Equal(t, 0, len(processedMetaHdrs)) - - // wrong header type in pool and defer called - dataPool.MetaBlocks().Put(currHash, shardHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Equal(t, nil, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 0, putCalledNr) - - notarizedHdrs = sp.NotarizedHdrs() - assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes = make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader = &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) -} + txHash := []byte("tx_hash1") + tdp := initDataPool(txHash) + randSeed := []byte("rand seed") + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func createShardData(hasher hashing.Hasher, marshalizer marshal.Marshalizer, miniBlocks []block.MiniBlock) []block.ShardData { - shardData := make([]block.ShardData, len(miniBlocks)) - for i := 0; i < len(miniBlocks); i++ { - marshaled, _ := marshalizer.Marshal(miniBlocks[i]) - hashed := hasher.Compute(string(marshaled)) - - shardMBHeader := block.ShardMiniBlockHeader{ - ReceiverShardId: miniBlocks[i].ReceiverShardID, - SenderShardId: miniBlocks[i].SenderShardID, - TxCount: uint32(len(miniBlocks[i].TxHashes)), - Hash: hashed, - } - shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) - shardMBHeaders = append(shardMBHeaders, shardMBHeader) - - shardData[0].ShardId = miniBlocks[i].SenderShardID - shardData[0].TxCount = 10 - shardData[0].HeaderHash = []byte("headerHash") - shardData[0].ShardMiniBlockHeaders = shardMBHeaders - } - - return shardData -} + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 2, putCalledNr) - - assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + SenderShardID: 1, + ReceiverShardID: 0, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) -func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testing.T) { - t.Parallel() - - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderMock() - forkDetector := &mock.ForkDetectorMock{} - highNonce := uint64(500) - forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { - return highNonce - } - putCalledNr := 0 - store := &mock.ChainStorerMock{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - putCalledNr++ - return nil - }, - } - - shardNr := uint32(5) - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - initAccountsMock(), - mock.NewMultiShardsCoordinatorMock(shardNr), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - forkDetector, - &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() - firstNonce := uint64(44) - - lastHdr := &block.MetaBlock{Round: 9, - Nonce: firstNonce, - RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - - shardBlock := make(block.Body, 0) - txHash := []byte("txhash") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock1 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - miniblock2 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 2, - TxHashes: txHashes, - } - miniblock3 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 3, - TxHashes: txHashes, - } - miniblock4 := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 4, - TxHashes: txHashes, - } - shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) - - miniBlocks := make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 - prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) - prevHdr := &block.MetaBlock{ - Round: 10, - Nonce: 45, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - - miniBlocks = make([]block.MiniBlock, 0) - miniBlocks = append(miniBlocks, miniblock3, miniblock4) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - currHdr := &block.MetaBlock{ - Round: 11, - Nonce: 46, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash"), - ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} - currHash, _ := sp.ComputeHeaderHash(currHdr) - prevHash, _ = sp.ComputeHeaderHash(prevHdr) - - // put headers in pool - dataPool.MetaBlocks().Put(currHash, currHdr) - dataPool.MetaBlocks().Put(prevHash, prevHdr) - dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ - Round: 12, - PrevRandSeed: []byte("nextrand"), - PrevHash: currHash, - Nonce: 47}) - - hashes := make([][]byte, 0) - hashes = append(hashes, currHash) - hashes = append(hashes, prevHash) - blockHeader := &block.Header{MetaBlockHashes: hashes} - - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) - assert.Nil(t, err) - assert.Equal(t, 2, len(processedMetaHdrs)) - - err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) - assert.Nil(t, err) - - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) - assert.Nil(t, err) - assert.Equal(t, 4, putCalledNr) - - assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) -} + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) -func createOneHeaderOneBody() (*block.Header, block.Body) { - txHash := []byte("tx_hash1") - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherStub{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash} - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - hdr := &block.Header{ - Nonce: 1, - PrevHash: []byte(""), - Signature: []byte("signature"), - PubKeysBitmap: []byte("00110"), - ShardId: 0, - RootHash: rootHash, - MiniBlockHeaders: mbHdrs, - } - - return hdr, body -} + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) -func TestShardProcessor_CheckHeaderBodyCorrelationReceiverMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) + // should return err + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) } -func TestShardProcessor_CheckHeaderBodyCorrelationSenderMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) -} +//------- checkAndRequestIfMetaHeadersMissing +func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing.T) { + t.Parallel() -func TestShardProcessor_CheckHeaderBodyCorrelationTxCountMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) -} + hdrNoncesRequestCalled := int32(0) + tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, _, mbHash := initBasicTestData() + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) -func TestShardProcessor_CheckHeaderBodyCorrelationHashMissmatch(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Equal(t, process.ErrHeaderBodyMismatch, err) -} + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") -func TestShardProcessor_CheckHeaderBodyCorrelationShouldPass(t *testing.T) { - t.Parallel() - - hdr, body := createOneHeaderOneBody() - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - initDataPool([]byte("tx_hash1")), - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - err := sp.CheckHeaderBodyCorrelation(hdr, body) - assert.Nil(t, err) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + // set accounts not dirty + journalLen := func() int { return 0 } + revertToSnapshot := func(snapshot int) error { + return nil + } + rootHashCalled := func() ([]byte, error) { + return rootHash, nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{ + RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { + atomic.AddInt32(&hdrNoncesRequestCalled, 1) + }, + }, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + + sp.CheckAndRequestIfMetaHeadersMissing(2) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int32(1), atomic.LoadInt32(&hdrNoncesRequestCalled)) + assert.Equal(t, err, process.ErrTimeIsOut) } -func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { +//-------- isMetaHeaderFinal +func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { t.Parallel() - marshalizer := &mock.MarshalizerMock{} - - poolFake := mock.NewPoolsHolderMock() - - metaBlock := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - poolFake, - &mock.ChainStorerMock{ - GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { - return marshalizer.Marshal(&metaBlock) - }, - GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { - return &mock.StorerStub{ - RemoveCalled: func(key []byte) error { - return nil - }, - } - }, - }, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - miniblockHashes := make(map[string]uint32, 0) - - meta := block.MetaBlock{ - Nonce: 1, - ShardInfo: make([]block.ShardData, 0), - } - hasher := &mock.HasherStub{} - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, nil, metaBlockRestored) - assert.False(t, ok) - - err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) - - assert.Equal(t, &metaBlock, metaBlockRestored) - assert.Nil(t, err) -} + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + blkc := &blockchain.BlockChain{ + CurrentBlockHeader: &block.Header{ + Nonce: 1, + RandSeed: randSeed, + }, + } + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) -func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - txHash := []byte("tx_hash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - hasher := &mock.HasherStub{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardId: 0, - SenderShardId: 2, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardId: 1, - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - idp := initDataPool([]byte("tx_hash1")) - idp.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{ - Nonce: 1, - Round: 1, - ShardInfo: shardHdrs, - }, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - return true - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - } - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - idp, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - meta := block.MetaBlock{ - Nonce: 0, - ShardInfo: make([]block.ShardData, 0), - } - - metaBytes, _ := marshalizer.Marshal(meta) - hasher.ComputeCalled = func(s string) []byte { - return []byte("cool") - } - metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) - - orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) - - assert.Equal(t, 1, len(orderedMetaBlocks)) - assert.Equal(t, orderedMetaBlocks[""], metaHash) - assert.Nil(t, err) -} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} -func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t *testing.T) { - t.Parallel() - - dataPool := initDataPool([]byte("tx_hash1")) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - &mock.ChainStorerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) -} + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + lastHdr := blkc.GetCurrentBlockHeader() + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) -func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) - _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + tdp.MetaBlocks().Put(metaHash, meta) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: metaHash, + PrevRandSeed: randSeed, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + tdp.MetaBlocks().Put(metaHash, meta) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + + err := sp.ProcessBlock(blkc, &hdr, body, haveTime) + assert.Equal(t, process.ErrTimeIsOut, err) + res := sp.IsMetaHeaderFinal(&hdr, nil, 0) + assert.False(t, res) + res = sp.IsMetaHeaderFinal(nil, nil, 0) + assert.False(t, res) + + meta = &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + RandSeed: randSeed, + } + ordered, _ := sp.GetOrderedMetaBlocks(3) + res = sp.IsMetaHeaderFinal(meta, ordered, 0) + assert.True(t, res) } -func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, uint64(0), hdr.GetNonce()) +//-------- requestFinalMissingHeaders +func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + sp.SetCurrHighestMetaHdrNonce(1) + res := sp.RequestFinalMissingHeaders() + assert.Equal(t, res > 0, true) } -func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { - t.Parallel() - - dataPool := integrationTests.CreateTestShardDataPool(nil) - store := initStore() - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - sp, _ := blproc.NewShardProcessor( - &mock.ServiceContainerMock{}, - dataPool, - store, - hasher, - marshalizer, - &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.ForkDetectorMock{}, - &mock.BlocksTrackerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.TransactionCoordinatorMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - ownHdr := &block.Header{ - Nonce: 1, - Round: 1, - } - ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) - _ = dataPool.Headers().Put(ownHash, ownHdr) - - shardInfo := make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr := genesisBlocks[sharding.MetachainShardId] - prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr := &block.MetaBlock{ - Nonce: 1, - Epoch: 0, - Round: 1, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - ownHdr = &block.Header{ - Nonce: 2, - Round: 2, - } - ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) - mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) - _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) - - shardInfo = make([]block.ShardData, 0) - shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 2, - Epoch: 0, - Round: 2, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - ShardInfo: shardInfo, - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - prevMetaHdr = currMetaHdr - prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) - currMetaHdr = &block.MetaBlock{ - Nonce: 3, - Epoch: 0, - Round: 3, - PrevHash: prevHash, - PrevRandSeed: prevMetaHdr.GetRandSeed(), - RandSeed: prevMetaHdr.GetRandSeed(), - } - currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) - - hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) - - assert.Nil(t, err) - assert.NotNil(t, hdr) - assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) +//--------- verifyIncludedMetaBlocksFinality +func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + prevMeta := genesisBlocks[sharding.MetachainShardId] + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: shardHdrs, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMeta.GetRandSeed(), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + + tdp.MetaBlocks().Put(metaHash, meta) + + prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: prevHash, + } + metaBytes, _ = marshalizer.Marshal(meta) + metaHash = hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr.Round = 4 + + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Nil(t, err) +} + +func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { + t.Parallel() + + mbHdrs := make([]block.MiniBlockHeader, 0) + rootHash := []byte("rootHash") + txHash := []byte("txhash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + tdp := mock.NewPoolsHolderMock() + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) + + lastHdr := genesisBlocks[0] + prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) + + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) + + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) + hdr.Round = 0 + err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + assert.Equal(t, err, process.ErrNilMetaBlockHeader) +} + +//------- CommitBlock + +func TestShardProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + accounts := &mock.AccountsStub{} + accounts.RevertToSnapshotCalled = func(snapshot int) error { + return nil + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk := make(block.Body, 0) + + err := sp.CommitBlock(nil, &block.Header{}, blk) + assert.Equal(t, process.ErrNilBlockChain, err) +} + +func TestShardProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + errMarshalizer := errors.New("failure") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + if reflect.DeepEqual(obj, hdr) { + return nil, errMarshalizer + } + + return []byte("obj"), nil + }, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizer, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blkc := createTestBlockchain() + + err := sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, errMarshalizer, err) +} + +func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + errPersister := errors.New("failure") + wasCalled := false + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + CommitCalled: func() ([]byte, error) { + return nil, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + hdrUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + HasCalled: func(key []byte) error { + return nil + }, + } + store := initStore() + store.AddStorer(dataRetriever.BlockHeaderUnit, hdrUnit) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err := sp.CommitBlock(blkc, hdr, body) + assert.True(t, wasCalled) + assert.Nil(t, err) +} + +func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + wasCalled := false + errPersister := errors.New("failure") + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + CommitCalled: func() (i []byte, e error) { + return nil, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{} + body := make(block.Body, 0) + body = append(body, &mb) + + miniBlockUnit := &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + wasCalled = true + return errPersister + }, + } + store := initStore() + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + }, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, err) + + blkc, _ := blockchain.NewBlockChain( + generateTestCache(), + ) + + _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) {}, + }) + + err = sp.CommitBlock(blkc, hdr, body) + + assert.Nil(t, err) + assert.True(t, wasCalled) +} + +func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + rootHash := []byte("root hash to be tested") + accounts := &mock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + body := make(block.Body, 0) + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { + return nil + } + blkc := createTestBlockchain() + err := sp.CommitBlock(blkc, hdr, body) + + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + txCache := &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + LenCalled: func() int { + return 0 + }, + } + tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return txCache + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) { + }, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + RegisterHandlerCalled: func(i func(key []byte)) { + + }, + } + } + + txHash := []byte("txHash") + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: []byte("0100101"), + Signature: []byte("signature"), + RootHash: rootHash, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + + err = sp.CommitBlock(blkc, hdr, body) + assert.Equal(t, process.ErrMissingTransaction, err) +} + +func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + forkDetectorAddCalled := false + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + if header == hdr { + forkDetectorAddCalled = true + return nil + } + + return errors.New("should have not got here") + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + assert.True(t, forkDetectorAddCalled) + assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) + //this should sleep as there is an async call to display current header and block in CommitBlock + time.Sleep(time.Second) +} + +func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash := []byte("tx_hash1") + + rootHash := []byte("root hash") + hdrHash := []byte("header hash") + randSeed := []byte("rand seed") + + prevHdr := &block.Header{ + Nonce: 0, + Round: 0, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + RandSeed: randSeed, + } + + hdr := &block.Header{ + Nonce: 1, + Round: 1, + PubKeysBitmap: rootHash, + PrevHash: hdrHash, + Signature: rootHash, + RootHash: rootHash, + PrevRandSeed: randSeed, + } + mb := block.MiniBlock{ + TxHashes: [][]byte{txHash}, + } + body := block.Body{&mb} + + mbHdr := block.MiniBlockHeader{ + TxCount: uint32(len(mb.TxHashes)), + Hash: hdrHash, + } + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + hdr.MiniBlockHeaders = mbHdrs + + accounts := &mock.AccountsStub{ + CommitCalled: func() (i []byte, e error) { + return rootHash, nil + }, + RootHashCalled: func() ([]byte, error) { + return rootHash, nil + }, + } + fd := &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { + return nil + }, + } + hasher := &mock.HasherStub{} + hasher.ComputeCalled = func(s string) []byte { + return hdrHash + } + store := initStore() + + var saveBlockCalled map[string]data.TransactionHandler + saveBlockCalledMutex := sync.Mutex{} + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{ + IndexerCalled: func() indexer.Indexer { + return &mock.IndexerMock{ + SaveBlockCalled: func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { + saveBlockCalledMutex.Lock() + saveBlockCalled = txPool + saveBlockCalledMutex.Unlock() + }, + } + }, + }, + tdp, + store, + hasher, + &mock.MarshalizerMock{}, + accounts, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + fd, + &mock.BlocksTrackerMock{ + AddBlockCalled: func(headerHandler data.HeaderHandler) { + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{ + GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { + switch blockType { + case block.TxBlock: + return map[string]data.TransactionHandler{ + "tx_1": &transaction.Transaction{Nonce: 1}, + "tx_2": &transaction.Transaction{Nonce: 2}, + } + case block.SmartContractResultBlock: + return map[string]data.TransactionHandler{ + "utx_1": &smartContractResult.SmartContractResult{Nonce: 1}, + "utx_2": &smartContractResult.SmartContractResult{Nonce: 2}, + } + default: + return nil + } + }, + }, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blkc := createTestBlockchain() + blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { + return prevHdr + } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return hdrHash + } + err := sp.ProcessBlock(blkc, hdr, body, haveTime) + assert.Nil(t, err) + err = sp.CommitBlock(blkc, hdr, body) + assert.Nil(t, err) + + // Wait for the index block go routine to start + time.Sleep(time.Second * 2) + + saveBlockCalledMutex.Lock() + wasCalled := saveBlockCalled + saveBlockCalledMutex.Unlock() + + assert.Equal(t, 4, len(wasCalled)) +} + +func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 3 } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + bl, err := sp.CreateBlockBody(0, func() bool { return true }) + // nil block + assert.Nil(t, bl) + // error + assert.Equal(t, process.ErrAccountStateDirty, err) +} + +func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + revToSnapshot := func(snapshot int) error { return nil } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + haveTime := func() bool { + return false + } + bl, err := sp.CreateBlockBody(0, haveTime) + // no error + assert.Equal(t, process.ErrTimeIsOut, err) + // no miniblocks + assert.Nil(t, bl) +} + +func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + journalLen := func() int { return 0 } + rootHashfunc := func() ([]byte, error) { + return []byte("roothash"), nil + } + haveTime := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + JournalLenCalled: journalLen, + RootHashCalled: rootHashfunc, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + blk, err := sp.CreateBlockBody(0, haveTime) + assert.NotNil(t, blk) + assert.Nil(t, err) +} + +//------- ComputeNewNoncePrevHash + +func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerStub{} + hasher := &mock.HasherStub{} + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr, txBlock := createTestHdrTxBlockBody() + marshalizer.MarshalCalled = func(obj interface{}) (bytes []byte, e error) { + if hdr == obj { + return []byte("hdrHeaderMarshalized"), nil + } + if reflect.DeepEqual(txBlock, obj) { + return []byte("txBlockBodyMarshalized"), nil + } + return nil, nil + } + hasher.ComputeCalled = func(s string) []byte { + if s == "hdrHeaderMarshalized" { + return []byte("hdr hash") + } + if s == "txBlockBodyMarshalized" { + return []byte("tx block body hash") + } + return nil + } + _, err := be.ComputeHeaderHash(hdr) + assert.Nil(t, err) +} + +func createTestHdrTxBlockBody() (*block.Header, block.Body) { + hasher := mock.HasherMock{} + hdr := &block.Header{ + Nonce: 1, + ShardId: 2, + Epoch: 3, + Round: 4, + TimeStamp: uint64(11223344), + PrevHash: hasher.Compute("prev hash"), + PubKeysBitmap: []byte{255, 0, 128}, + Signature: hasher.Compute("signature"), + RootHash: hasher.Compute("root hash"), + } + txBlock := block.Body{ + { + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_0_1"), + hasher.Compute("txHash_0_2"), + }, + }, + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_1_1"), + hasher.Compute("txHash_1_2"), + }, + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: [][]byte{ + hasher.Compute("txHash_2_1"), + }, + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + return hdr, txBlock +} + +//------- ComputeNewNoncePrevHash + +func TestShardProcessor_DisplayLogInfo(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + hasher := mock.HasherMock{} + hdr, txBlock := createTestHdrTxBlockBody() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + assert.NotNil(t, sp) + hdr.PrevHash = hasher.Compute("prev hash") + sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp) +} + +func TestBlockProcessor_CreateBlockHeaderShouldNotReturnNil(t *testing.T) { + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + mbHeaders, err := bp.CreateBlockHeader(nil, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.NotNil(t, mbHeaders) + assert.Equal(t, 0, len(mbHeaders.(*block.Header).MiniBlockHeaders)) +} + +func TestShardProcessor_CreateBlockHeaderShouldErrWhenMarshalizerErrors(t *testing.T) { + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{Fail: true}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.NotNil(t, err) + assert.Nil(t, mbHeaders) +} + +func TestShardProcessor_CreateBlockHeaderReturnsOK(t *testing.T) { + t.Parallel() + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := block.Body{ + { + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 2, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + { + ReceiverShardID: 3, + SenderShardID: 0, + TxHashes: make([][]byte, 0), + }, + } + mbHeaders, err := bp.CreateBlockHeader(body, 0, func() bool { + return true + }) + assert.Nil(t, err) + assert.Equal(t, len(body), len(mbHeaders.(*block.Header).MiniBlockHeaders)) +} + +func TestShardProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) { + t.Parallel() + // set accounts dirty + journalEntries := 3 + revToSnapshot := func(snapshot int) error { + journalEntries = 0 + return nil + } + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{ + RevertToSnapshotCalled: revToSnapshot, + }, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := bp.CommitBlock(nil, nil, nil) + assert.NotNil(t, err) + assert.Equal(t, 0, journalEntries) +} + +func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + txHash1 := []byte("txHash1") + mb1 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash1}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + body = append(body, &mb1) + body = append(body, &mb0) + body = append(body, &mb1) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.NotNil(t, msh) + assert.NotNil(t, mstx) + _, found := msh[0] + assert.False(t, found) + + expectedBody := make(block.Body, 0) + err = marshalizer.Unmarshal(&expectedBody, msh[1]) + assert.Nil(t, err) + assert.Equal(t, len(expectedBody), 2) + assert.Equal(t, &mb1, expectedBody[0]) + assert.Equal(t, &mb1, expectedBody[1]) +} + +func TestShardProcessor_MarshalizedDataWrongType(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + wr := &wrongBody{} + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, wr) + assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) +} + +func TestShardProcessor_MarshalizedDataNilInput(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizer := &mock.MarshalizerMock{ + Fail: false, + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + msh, mstx, err := sp.MarshalizedDataToBroadcast(nil, nil) + assert.Equal(t, process.ErrNilMiniBlocks, err) + assert.Nil(t, msh) + assert.Nil(t, mstx) +} + +func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { + t.Parallel() + wasCalled := false + tdp := initDataPool([]byte("tx_hash1")) + txHash0 := []byte("txHash0") + mb0 := block.MiniBlock{ + ReceiverShardID: 1, + SenderShardID: 0, + TxHashes: [][]byte{txHash0}, + } + body := make(block.Body, 0) + body = append(body, &mb0) + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + wasCalled = true + return nil, process.ErrMarshalWithoutSuccess + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + initStore(), + marshalizer, + &mock.HasherMock{}, + tdp, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + tdp, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + msh, mstx, err := sp.MarshalizedDataToBroadcast(&block.Header{}, body) + assert.Nil(t, err) + assert.True(t, wasCalled) + assert.Equal(t, 0, len(msh)) + assert.Equal(t, 0, len(mstx)) +} + +//------- receivedMetaBlock + +func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + miniBlockHash2 := []byte("miniblock hash 2") + miniBlockHash3 := []byte("miniblock hash 3") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + string(miniBlockHash2): 0, + string(miniBlockHash3): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + miniBlockHash1Requested := int32(0) + miniBlockHash2Requested := int32(0) + miniBlockHash3Requested := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + if bytes.Equal(miniBlockHash1, miniblockHash) { + atomic.AddInt32(&miniBlockHash1Requested, 1) + } + if bytes.Equal(miniBlockHash2, miniblockHash) { + atomic.AddInt32(&miniBlockHash2Requested, 1) + } + if bytes.Equal(miniBlockHash3, miniblockHash) { + atomic.AddInt32(&miniBlockHash3Requested, 1) + } + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + bp.ReceivedMetaBlock(metaBlockHash) + + //we have to wait to be sure txHash1Requested is not incremented by a late call + time.Sleep(time.Second) + + assert.Equal(t, int32(0), atomic.LoadInt32(&miniBlockHash1Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) + assert.Equal(t, int32(1), atomic.LoadInt32(&miniBlockHash2Requested)) +} + +//--------- receivedMetaBlockNoMissingMiniBlocks +func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a metablock that will return 3 miniblock hashes + //1 miniblock hash will be in cache + //2 will be requested on network + + miniBlockHash1 := []byte("miniblock hash 1 found in cache") + + metaBlock := mock.HeaderHandlerStub{ + GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { + return map[string]uint32{ + string(miniBlockHash1): 0, + } + }, + } + + //put this metaBlock inside datapool + metaBlockHash := []byte("metablock hash") + dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + //put the existing miniblock inside datapool + dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) + + noOfMissingMiniBlocks := int32(0) + + requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + atomic.AddInt32(&noOfMissingMiniBlocks, 1) + }} + + tc, _ := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + requestHandler, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + requestHandler, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + sp.ReceivedMetaBlock(metaBlockHash) + assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) +} + +//--------- createAndProcessCrossMiniBlocksDstMe +func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte("tx_hash1") + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: mbHdr.ReceiverShardID, + SenderShardId: mbHdr.SenderShardID, + TxCount: mbHdr.TxCount, + Hash: mbHdr.Hash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + metaBytes, _ := marshalizer.Marshal(meta) + metaHash := hasher.Compute(string(metaBytes)) + + tdp.MetaBlocks().Put(metaHash, meta) + + haveTimeTrue := func() bool { + return true + } + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + assert.Equal(t, err == nil, true) + assert.Equal(t, len(miniBlockSlice) == 0, true) + assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) + assert.Equal(t, noOfTxs, uint32(0)) +} + +func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { + t.Parallel() + + tdp := mock.NewPoolsHolderMock() + txHash := []byte(nil) + tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) + + startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + startHeaders[sharding.MetachainShardId] = &block.Header{} + + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + startHeaders, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + assert.Nil(t, sp) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlocksInMetaBlock(t *testing.T) { + t.Parallel() + + haveTimeTrue := func() bool { + return true + } + tdp := mock.NewPoolsHolderMock() + destShardId := uint32(2) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + miniblocks := make([]*block.MiniBlock, 6) + + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + miniblocks[i], _ = createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + } + + //put 2 metablocks in pool + meta := &block.MetaBlock{ + Nonce: 1, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 1, + PrevRandSeed: []byte("roothash"), + } + + mb1Hash := []byte("meta block 1") + tdp.MetaBlocks().Put( + mb1Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 2, + ShardInfo: createShardData(hasher, marshalizer, miniBlocks), + Round: 2, + } + + mb2Hash := []byte("meta block 2") + tdp.MetaBlocks().Put( + mb2Hash, + meta, + ) + + meta = &block.MetaBlock{ + Nonce: 3, + ShardInfo: make([]block.ShardData, 0), + Round: 3, + PrevRandSeed: []byte("roothash"), + } + + mb3Hash := []byte("meta block 3") + tdp.MetaBlocks().Put( + mb3Hash, + meta, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + + assert.Equal(t, 0, len(miniBlocksReturned)) + assert.Equal(t, 0, len(usedMetaHdrsHashes)) + assert.Equal(t, uint32(0), nrTxAdded) + assert.Nil(t, err) +} + +//------- createMiniBlocks + +func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + //we will have a 3 txs in pool + + txHash1 := []byte("tx hash 1") + txHash2 := []byte("tx hash 2") + txHash3 := []byte("tx hash 3") + + senderShardId := uint32(0) + receiverShardId := uint32(0) + + tx1Nonce := uint64(45) + tx2Nonce := uint64(46) + tx3Nonce := uint64(47) + + //put the existing tx inside datapool + cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) + dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ + Nonce: tx1Nonce, + Data: string(txHash1), + }, cacheId) + dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ + Nonce: tx2Nonce, + Data: string(txHash2), + }, cacheId) + dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ + Nonce: tx3Nonce, + Data: string(txHash3), + }, cacheId) + + tx1ExecutionResult := uint64(0) + tx2ExecutionResult := uint64(0) + tx3ExecutionResult := uint64(0) + + txProcessorMock := &mock.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction, round uint64) error { + //execution, in this context, means moving the tx nonce to itx corresponding execution result variable + if transaction.Data == string(txHash1) { + tx1ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash2) { + tx2ExecutionResult = transaction.Nonce + } + if transaction.Data == string(txHash3) { + tx3ExecutionResult = transaction.Nonce + } + + return nil + }, + } + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + accntAdapter := &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + shardCoordinator, + initStore(), + marshalizer, + hasher, + dataPool, + &mock.AddressConverterMock{}, + accntAdapter, + &mock.RequestHandlerMock{}, + txProcessorMock, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + accntAdapter, + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + initStore(), + hasher, + marshalizer, + accntAdapter, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) + + assert.Nil(t, err) + //testing execution + assert.Equal(t, tx1Nonce, tx1ExecutionResult) + assert.Equal(t, tx2Nonce, tx2ExecutionResult) + assert.Equal(t, tx3Nonce, tx3ExecutionResult) + //one miniblock output + assert.Equal(t, 1, len(blockBody)) + //miniblock should have 3 txs + assert.Equal(t, 3, len(blockBody[0].TxHashes)) + //testing all 3 hashes are present in block body + assert.True(t, isInTxHashes(txHash1, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash2, blockBody[0].TxHashes)) + assert.True(t, isInTxHashes(txHash3, blockBody[0].TxHashes)) +} + +func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { + t.Parallel() + + //we have 3 metablocks in pool each containing 2 miniblocks. + //blockbody will have 2 + 1 miniblocks from 2 out of the 3 metablocks + //The test should remove only one metablock + + destShardId := uint32(2) + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + + miniblocks := make([]*block.MiniBlock, 6) + miniblockHashes := make([][]byte, 6) + + destShards := []uint32{1, 3, 4} + for i := 0; i < 6; i++ { + mb, hash := createDummyMiniBlock(fmt.Sprintf("tx hash %d", i), marshalizer, hasher, destShardId, destShards[i/2]) + miniblocks[i] = mb + miniblockHashes[i] = hash + } + + //put 3 metablocks in pool + mb1Hash := []byte("meta block 1") + dataPool.MetaBlocks().Put( + mb1Hash, + createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), + ) + mb2Hash := []byte("meta block 2") + dataPool.MetaBlocks().Put( + mb2Hash, + createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), + ) + mb3Hash := []byte("meta block 3") + dataPool.MetaBlocks().Put( + mb3Hash, + createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), + ) + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.CurrentShard = destShardId + shardCoordinator.SetNoShards(destShardId + 1) + + bp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + shardCoordinator, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{ + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + }, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + }, + createGenesisBlocks(shardCoordinator), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + //create block body with first 3 miniblocks from miniblocks var + blockBody := block.Body{miniblocks[0], miniblocks[1], miniblocks[2]} + + hashes := make([][]byte, 0) + hashes = append(hashes, mb1Hash) + hashes = append(hashes, mb2Hash) + hashes = append(hashes, mb3Hash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + _, err := bp.GetProcessedMetaBlocksFromPool(blockBody, blockHeader) + + assert.Nil(t, err) + //check WasMiniBlockProcessed for remaining metablocks + metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) + assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) + assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) + + metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) + assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) +} + +func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + + be, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := be.RestoreBlockIntoPools(nil, nil) + assert.NotNil(t, err) + assert.Equal(t, process.ErrNilBlockHeader, err) +} + +func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilTxBlockBody(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + initStore(), + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.RestoreBlockIntoPools(&block.Header{}, nil) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilTxBlockBody) +} + +func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { + t.Parallel() + + txHash := []byte("tx hash 1") + + dataPool := mock.NewPoolsHolderMock() + marshalizerMock := &mock.MarshalizerMock{} + hasherMock := &mock.HasherStub{} + + body := make(block.Body, 0) + tx := transaction.Transaction{Nonce: 1} + buffTx, _ := marshalizerMock.Marshal(tx) + + store := &mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + m := make(map[string][]byte, 0) + m[string(txHash)] = buffTx + return m, nil + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + } + + factory, _ := shard.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + store, + marshalizerMock, + hasherMock, + dataPool, + &mock.AddressConverterMock{}, + initAccountsMock(), + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + ) + container, _ := factory.Create() + + tc, err := coordinator.NewTransactionCoordinator( + mock.NewMultiShardsCoordinatorMock(3), + initAccountsMock(), + dataPool, + &mock.RequestHandlerMock{}, + container, + &mock.InterimProcessorContainerMock{}, + ) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasherMock, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + tc, + &mock.Uint64ByteSliceConverterMock{}, + ) + + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + miniblockHash := []byte("mini block hash 1") + hasherMock.ComputeCalled = func(s string) []byte { + return miniblockHash + } + + metablockHash := []byte("meta block hash 1") + metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) + metablockHeader.SetMiniBlockProcessed(metablockHash, true) + dataPool.MetaBlocks().Put( + metablockHash, + metablockHeader, + ) + + err = sp.RestoreBlockIntoPools(&block.Header{}, body) + + miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) + txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) + metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) + metablock := metablockFromPool.(*block.MetaBlock) + assert.Nil(t, err) + assert.Equal(t, &miniblock, miniblockFromPool) + assert.Equal(t, &tx, txFromPool) + assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) +} + +func TestShardProcessor_DecodeBlockBody(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + body := make(block.Body, 0) + body = append(body, &block.MiniBlock{ReceiverShardID: 69}) + message, err := marshalizerMock.Marshal(body) + assert.Nil(t, err) + + dcdBlk := sp.DecodeBlockBody(nil) + assert.Nil(t, dcdBlk) + + dcdBlk = sp.DecodeBlockBody(message) + assert.Equal(t, body, dcdBlk) + assert.Equal(t, uint32(69), body[0].ReceiverShardID) +} + +func TestShardProcessor_DecodeBlockHeader(t *testing.T) { + t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + marshalizerMock := &mock.MarshalizerMock{} + sp, err := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + tdp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + marshalizerMock, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + hdr := &block.Header{} + hdr.Nonce = 1 + hdr.TimeStamp = uint64(0) + hdr.Signature = []byte("A") + message, err := marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + message, err = marshalizerMock.Marshal(hdr) + assert.Nil(t, err) + + dcdHdr := sp.DecodeBlockHeader(nil) + assert.Nil(t, dcdHdr) + + dcdHdr = sp.DecodeBlockHeader(message) + assert.Equal(t, hdr, dcdHdr) + assert.Equal(t, []byte("A"), dcdHdr.GetSignature()) +} + +func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := initDataPool([]byte("tx_hash1")) + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + lastHdr := &block.MetaBlock{Round: 9, + Nonce: 44, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //put the existing headers inside datapool + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + + err := sp.IsHdrConstructionValid(nil, prevHdr) + assert.Equal(t, err, process.ErrNilBlockHeader) + + err = sp.IsHdrConstructionValid(currHdr, nil) + assert.Equal(t, err, process.ErrNilBlockHeader) + + currHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRootStateMissmatch) + + currHdr.Nonce = 0 + prevHdr.Nonce = 0 + prevHdr.RootHash = nil + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) + + currHdr.Nonce = 46 + prevHdr.Nonce = 45 + prevHdr.Round = currHdr.Round + 1 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + + prevHdr.Round = currHdr.Round - 1 + currHdr.Nonce = prevHdr.Nonce + 2 + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrWrongNonceInBlock) + + currHdr.Nonce = prevHdr.Nonce + 1 + prevHdr.RandSeed = []byte("randomwrong") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrRandSeedMismatch) + + prevHdr.RandSeed = currRandSeed + currHdr.PrevHash = []byte("wronghash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + + currHdr.PrevHash = prevHash + prevHdr.RootHash = []byte("prevRootHash") + err = sp.IsHdrConstructionValid(currHdr, prevHdr) + assert.Nil(t, err) +} + +func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + //put the existing headers inside datapool + + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash")} + + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash")} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + shardHdr := &block.Header{Round: 15} + shardBlock := block.Body{} + + blockHeader := &block.Header{} + + // test header not in pool and defer called + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + assert.Equal(t, 0, len(processedMetaHdrs)) + + // wrong header type in pool and defer called + dataPool.MetaBlocks().Put(currHash, shardHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Equal(t, nil, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 0, putCalledNr) + + notarizedHdrs = sp.NotarizedHdrs() + assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes = make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader = &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) +} + +func createShardData(hasher hashing.Hasher, marshalizer marshal.Marshalizer, miniBlocks []block.MiniBlock) []block.ShardData { + shardData := make([]block.ShardData, len(miniBlocks)) + for i := 0; i < len(miniBlocks); i++ { + marshaled, _ := marshalizer.Marshal(miniBlocks[i]) + hashed := hasher.Compute(string(marshaled)) + + shardMBHeader := block.ShardMiniBlockHeader{ + ReceiverShardId: miniBlocks[i].ReceiverShardID, + SenderShardId: miniBlocks[i].SenderShardID, + TxCount: uint32(len(miniBlocks[i].TxHashes)), + Hash: hashed, + } + shardMBHeaders := make([]block.ShardMiniBlockHeader, 0) + shardMBHeaders = append(shardMBHeaders, shardMBHeader) + + shardData[0].ShardId = miniBlocks[i].SenderShardID + shardData[0].TxCount = 10 + shardData[0].HeaderHash = []byte("headerHash") + shardData[0].ShardMiniBlockHeaders = shardMBHeaders + } + + return shardData +} + +func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 2, putCalledNr) + + assert.Equal(t, prevHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) +} + +func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testing.T) { + t.Parallel() + + hasher := mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + dataPool := mock.NewPoolsHolderMock() + forkDetector := &mock.ForkDetectorMock{} + highNonce := uint64(500) + forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { + return highNonce + } + putCalledNr := 0 + store := &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putCalledNr++ + return nil + }, + } + + shardNr := uint32(5) + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + initAccountsMock(), + mock.NewMultiShardsCoordinatorMock(shardNr), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + forkDetector, + &mock.BlocksTrackerMock{ + RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { + return nil + }, + UnnotarisedBlocksCalled: func() []data.HeaderHandler { + return make([]data.HeaderHandler, 0) + }, + }, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(shardNr)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + prevRandSeed := []byte("prevrand") + currRandSeed := []byte("currrand") + notarizedHdrs := sp.NotarizedHdrs() + firstNonce := uint64(44) + + lastHdr := &block.MetaBlock{Round: 9, + Nonce: firstNonce, + RandSeed: prevRandSeed} + notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + shardBlock := make(block.Body, 0) + txHash := []byte("txhash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock1 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + miniblock2 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 2, + TxHashes: txHashes, + } + miniblock3 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 3, + TxHashes: txHashes, + } + miniblock4 := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 4, + TxHashes: txHashes, + } + shardBlock = append(shardBlock, &miniblock1, &miniblock2, &miniblock3, &miniblock4) + + miniBlocks := make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock1, miniblock2) + //header shard 0 + prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) + prevHdr := &block.MetaBlock{ + Round: 10, + Nonce: 45, + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, + RootHash: []byte("prevRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + + miniBlocks = make([]block.MiniBlock, 0) + miniBlocks = append(miniBlocks, miniblock3, miniblock4) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + currHdr := &block.MetaBlock{ + Round: 11, + Nonce: 46, + PrevRandSeed: currRandSeed, + RandSeed: []byte("nextrand"), + PrevHash: prevHash, + RootHash: []byte("currRootHash"), + ShardInfo: createShardData(hasher, marshalizer, miniBlocks)} + currHash, _ := sp.ComputeHeaderHash(currHdr) + prevHash, _ = sp.ComputeHeaderHash(prevHdr) + + // put headers in pool + dataPool.MetaBlocks().Put(currHash, currHdr) + dataPool.MetaBlocks().Put(prevHash, prevHdr) + dataPool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ + Round: 12, + PrevRandSeed: []byte("nextrand"), + PrevHash: currHash, + Nonce: 47}) + + hashes := make([][]byte, 0) + hashes = append(hashes, currHash) + hashes = append(hashes, prevHash) + blockHeader := &block.Header{MetaBlockHashes: hashes} + + processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromPool(shardBlock, blockHeader) + assert.Nil(t, err) + assert.Equal(t, 2, len(processedMetaHdrs)) + + err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) + assert.Nil(t, err) + + err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + assert.Nil(t, err) + assert.Equal(t, 4, putCalledNr) + + assert.Equal(t, currHdr, sp.LastNotarizedHdrForShard(sharding.MetachainShardId)) +} + +func createOneHeaderOneBody() (*block.Header, block.Body) { + txHash := []byte("tx_hash1") + rootHash := []byte("rootHash") + body := make(block.Body, 0) + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + body = append(body, &miniblock) + + hasher := &mock.HasherStub{} + marshalizer := &mock.MarshalizerMock{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + mbHdr := block.MiniBlockHeader{ + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: uint32(len(txHashes)), + Hash: mbHash} + mbHdrs := make([]block.MiniBlockHeader, 0) + mbHdrs = append(mbHdrs, mbHdr) + + hdr := &block.Header{ + Nonce: 1, + PrevHash: []byte(""), + Signature: []byte("signature"), + PubKeysBitmap: []byte("00110"), + ShardId: 0, + RootHash: rootHash, + MiniBlockHeaders: mbHdrs, + } + + return hdr, body +} + +func TestShardProcessor_CheckHeaderBodyCorrelationReceiverMissmatch(t *testing.T) { + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].ReceiverShardID = body[0].ReceiverShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) +} + +func TestShardProcessor_CheckHeaderBodyCorrelationSenderMissmatch(t *testing.T) { + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].SenderShardID = body[0].SenderShardID + 1 + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) +} + +func TestShardProcessor_CheckHeaderBodyCorrelationTxCountMissmatch(t *testing.T) { + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].TxCount = uint32(len(body[0].TxHashes) + 1) + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) +} + +func TestShardProcessor_CheckHeaderBodyCorrelationHashMissmatch(t *testing.T) { + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr.MiniBlockHeaders[0].Hash = []byte("wrongHash") + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Equal(t, process.ErrHeaderBodyMismatch, err) +} + +func TestShardProcessor_CheckHeaderBodyCorrelationShouldPass(t *testing.T) { + t.Parallel() + + hdr, body := createOneHeaderOneBody() + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + initDataPool([]byte("tx_hash1")), + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + err := sp.CheckHeaderBodyCorrelation(hdr, body) + assert.Nil(t, err) +} + +func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + poolFake := mock.NewPoolsHolderMock() + + metaBlock := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + poolFake, + &mock.ChainStorerMock{ + GetCalled: func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + return marshalizer.Marshal(&metaBlock) + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + }, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + miniblockHashes := make(map[string]uint32, 0) + + meta := block.MetaBlock{ + Nonce: 1, + ShardInfo: make([]block.ShardData, 0), + } + hasher := &mock.HasherStub{} + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, nil, metaBlockRestored) + assert.False(t, ok) + + err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + + metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + + assert.Equal(t, &metaBlock, metaBlockRestored) + assert.Nil(t, err) +} + +func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + txHash := []byte("tx_hash1") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + } + hasher := &mock.HasherStub{} + + mbbytes, _ := marshalizer.Marshal(miniblock) + mbHash := hasher.Compute(string(mbbytes)) + + shardMiniBlock := block.ShardMiniBlockHeader{ + ReceiverShardId: 0, + SenderShardId: 2, + TxCount: uint32(len(txHashes)), + Hash: mbHash, + } + shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) + shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) + shardHeader := block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: shardMiniblockHdrs, + } + shardHdrs := make([]block.ShardData, 0) + shardHdrs = append(shardHdrs, shardHeader) + + idp := initDataPool([]byte("tx_hash1")) + idp.MetaBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return nil + }, + LenCalled: func() int { + return 0 + }, + PeekCalled: func(key []byte) (value interface{}, ok bool) { + return &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: shardHdrs, + }, true + }, + PutCalled: func(key []byte, value interface{}) (evicted bool) { + return true + }, + RegisterHandlerCalled: func(i func(key []byte)) {}, + } + } + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + idp, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + meta := block.MetaBlock{ + Nonce: 0, + ShardInfo: make([]block.ShardData, 0), + } + + metaBytes, _ := marshalizer.Marshal(meta) + hasher.ComputeCalled = func(s string) []byte { + return []byte("cool") + } + metaHash := hasher.Compute(string(metaBytes)) + metablockHashes := make([][]byte, 0) + metablockHashes = append(metablockHashes, metaHash) + + orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) + + assert.Equal(t, 1, len(orderedMetaBlocks)) + assert.Equal(t, orderedMetaBlocks[""], metaHash) + assert.Nil(t, err) +} + +func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t *testing.T) { + t.Parallel() + + dataPool := initDataPool([]byte("tx_hash1")) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + &mock.ChainStorerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(0) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) +} + +func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 1}) + _ = dataPool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) +} + +func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, uint64(0), hdr.GetNonce()) +} + +func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { + t.Parallel() + + dataPool := integrationTests.CreateTestShardDataPool(nil) + store := initStore() + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + + sp, _ := blproc.NewShardProcessor( + &mock.ServiceContainerMock{}, + dataPool, + store, + hasher, + marshalizer, + &mock.AccountsStub{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.ForkDetectorMock{}, + &mock.BlocksTrackerMock{}, + genesisBlocks, + &mock.RequestHandlerMock{}, + &mock.TransactionCoordinatorMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + + ownHdr := &block.Header{ + Nonce: 1, + Round: 1, + } + ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) + _ = dataPool.Headers().Put(ownHash, ownHdr) + + shardInfo := make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr := genesisBlocks[sharding.MetachainShardId] + prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr := &block.MetaBlock{ + Nonce: 1, + Epoch: 0, + Round: 1, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + ownHdr = &block.Header{ + Nonce: 2, + Round: 2, + } + ownHash, _ = core.CalculateHash(marshalizer, hasher, ownHdr) + mrsOwnHdr, _ := marshalizer.Marshal(ownHdr) + _ = store.Put(dataRetriever.BlockHeaderUnit, ownHash, mrsOwnHdr) + + shardInfo = make([]block.ShardData, 0) + shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardId: 0}) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 2, + Epoch: 0, + Round: 2, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + ShardInfo: shardInfo, + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + prevMetaHdr = currMetaHdr + prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMetaHdr) + currMetaHdr = &block.MetaBlock{ + Nonce: 3, + Epoch: 0, + Round: 3, + PrevHash: prevHash, + PrevRandSeed: prevMetaHdr.GetRandSeed(), + RandSeed: prevMetaHdr.GetRandSeed(), + } + currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) + _ = dataPool.MetaBlocks().Put(currHash, currMetaHdr) + + hdr, _, err := sp.GetHighestHdrForOwnShardFromMetachain(4) + + assert.Nil(t, err) + assert.NotNil(t, hdr) + assert.Equal(t, ownHdr.GetNonce(), hdr.GetNonce()) } func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { @@ -4900,9 +4900,9 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.AccountsStub{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.ForkDetectorMock{}, &mock.BlocksTrackerMock{}, createGenesisBlocks(shardC), diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 232b4712d94..6716ebc11e7 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,786 +1,786 @@ package coordinator import ( - "sort" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" + "sort" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" ) type transactionCoordinator struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - miniBlockPool storage.Cacher + shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + miniBlockPool storage.Cacher - mutPreProcessor sync.RWMutex - txPreProcessors map[block.Type]process.PreProcessor - keysTxPreProcs []block.Type + mutPreProcessor sync.RWMutex + txPreProcessors map[block.Type]process.PreProcessor + keysTxPreProcs []block.Type - mutInterimProcessors sync.RWMutex - interimProcessors map[block.Type]process.IntermediateTransactionHandler - keysInterimProcs []block.Type + mutInterimProcessors sync.RWMutex + interimProcessors map[block.Type]process.IntermediateTransactionHandler + keysInterimProcs []block.Type - mutRequestedTxs sync.RWMutex - requestedTxs map[block.Type]int + mutRequestedTxs sync.RWMutex + requestedTxs map[block.Type]int - onRequestMiniBlock func(shardId uint32, mbHash []byte) + onRequestMiniBlock func(shardId uint32, mbHash []byte) } var log = logger.DefaultLogger() // NewTransactionCoordinator creates a transaction coordinator to run and coordinate preprocessors and processors func NewTransactionCoordinator( - shardCoordinator sharding.Coordinator, - accounts state.AccountsAdapter, - dataPool dataRetriever.PoolsHolder, - requestHandler process.RequestHandler, - preProcessors process.PreProcessorsContainer, - interProcessors process.IntermediateProcessorContainer, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + dataPool dataRetriever.PoolsHolder, + requestHandler process.RequestHandler, + preProcessors process.PreProcessorsContainer, + interProcessors process.IntermediateProcessorContainer, ) (*transactionCoordinator, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if dataPool == nil || dataPool.IsInterfaceNil() { - return nil, process.ErrNilDataPoolHolder - } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } - if interProcessors == nil || interProcessors.IsInterfaceNil() { - return nil, process.ErrNilIntermediateProcessorContainer - } - if preProcessors == nil || preProcessors.IsInterfaceNil() { - return nil, process.ErrNilPreProcessorsContainer - } - - tc := &transactionCoordinator{ - shardCoordinator: shardCoordinator, - accounts: accounts, - } - - tc.miniBlockPool = dataPool.MiniBlocks() - if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { - return nil, process.ErrNilMiniBlockPool - } - tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) - - tc.onRequestMiniBlock = requestHandler.RequestMiniBlock - tc.requestedTxs = make(map[block.Type]int) - tc.txPreProcessors = make(map[block.Type]process.PreProcessor) - tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) - - tc.keysTxPreProcs = preProcessors.Keys() - sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { - return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] - }) - for _, value := range tc.keysTxPreProcs { - preProc, err := preProcessors.Get(value) - if err != nil { - return nil, err - } - tc.txPreProcessors[value] = preProc - } - - tc.keysInterimProcs = interProcessors.Keys() - sort.Slice(tc.keysInterimProcs, func(i, j int) bool { - return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] - }) - for _, value := range tc.keysInterimProcs { - interProc, err := interProcessors.Get(value) - if err != nil { - return nil, err - } - tc.interimProcessors[value] = interProc - } - - return tc, nil + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + if requestHandler == nil || requestHandler.IsInterfaceNil() { + return nil, process.ErrNilRequestHandler + } + if interProcessors == nil || interProcessors.IsInterfaceNil() { + return nil, process.ErrNilIntermediateProcessorContainer + } + if preProcessors == nil || preProcessors.IsInterfaceNil() { + return nil, process.ErrNilPreProcessorsContainer + } + + tc := &transactionCoordinator{ + shardCoordinator: shardCoordinator, + accounts: accounts, + } + + tc.miniBlockPool = dataPool.MiniBlocks() + if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { + return nil, process.ErrNilMiniBlockPool + } + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) + + tc.onRequestMiniBlock = requestHandler.RequestMiniBlock + tc.requestedTxs = make(map[block.Type]int) + tc.txPreProcessors = make(map[block.Type]process.PreProcessor) + tc.interimProcessors = make(map[block.Type]process.IntermediateTransactionHandler) + + tc.keysTxPreProcs = preProcessors.Keys() + sort.Slice(tc.keysTxPreProcs, func(i, j int) bool { + return tc.keysTxPreProcs[i] < tc.keysTxPreProcs[j] + }) + for _, value := range tc.keysTxPreProcs { + preProc, err := preProcessors.Get(value) + if err != nil { + return nil, err + } + tc.txPreProcessors[value] = preProc + } + + tc.keysInterimProcs = interProcessors.Keys() + sort.Slice(tc.keysInterimProcs, func(i, j int) bool { + return tc.keysInterimProcs[i] < tc.keysInterimProcs[j] + }) + for _, value := range tc.keysInterimProcs { + interProc, err := interProcessors.Get(value) + if err != nil { + return nil, err + } + tc.interimProcessors[value] = interProc + } + + return tc, nil } // separateBodyByType creates a map of bodies according to type func (tc *transactionCoordinator) separateBodyByType(body block.Body) map[block.Type]block.Body { - separatedBodies := make(map[block.Type]block.Body) + separatedBodies := make(map[block.Type]block.Body) - for i := 0; i < len(body); i++ { - mb := body[i] + for i := 0; i < len(body); i++ { + mb := body[i] - if separatedBodies[mb.Type] == nil { - separatedBodies[mb.Type] = block.Body{} - } + if separatedBodies[mb.Type] == nil { + separatedBodies[mb.Type] = block.Body{} + } - separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) - } + separatedBodies[mb.Type] = append(separatedBodies[mb.Type], mb) + } - return separatedBodies + return separatedBodies } // initRequestedTxs init the requested txs number func (tc *transactionCoordinator) initRequestedTxs() { - tc.mutRequestedTxs.Lock() - tc.requestedTxs = make(map[block.Type]int) - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs = make(map[block.Type]int) + tc.mutRequestedTxs.Unlock() } // RequestBlockTransactions verifies missing transaction and requests them func (tc *transactionCoordinator) RequestBlockTransactions(body block.Body) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - tc.initRequestedTxs() + tc.initRequestedTxs() - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } - requestedTxs := preproc.RequestBlockTransactions(blockBody) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } + requestedTxs := preproc.RequestBlockTransactions(blockBody) - tc.mutRequestedTxs.Lock() - tc.requestedTxs[blockType] = requestedTxs - tc.mutRequestedTxs.Unlock() + tc.mutRequestedTxs.Lock() + tc.requestedTxs[blockType] = requestedTxs + tc.mutRequestedTxs.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() } // IsDataPreparedForProcessing verifies if all the needed data is prepared func (tc *transactionCoordinator) IsDataPreparedForProcessing(haveTime func() time.Duration) error { - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} + wg := sync.WaitGroup{} - tc.mutRequestedTxs.RLock() - wg.Add(len(tc.requestedTxs)) + tc.mutRequestedTxs.RLock() + wg.Add(len(tc.requestedTxs)) - for key, value := range tc.requestedTxs { - go func(blockType block.Type, requestedTxs int) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() + for key, value := range tc.requestedTxs { + go func(blockType block.Type, requestedTxs int) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() - return - } + return + } - err := preproc.IsDataPrepared(requestedTxs, haveTime) - if err != nil { - log.Debug(err.Error()) + err := preproc.IsDataPrepared(requestedTxs, haveTime) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - tc.mutRequestedTxs.RUnlock() - wg.Wait() + tc.mutRequestedTxs.RUnlock() + wg.Wait() - return errFound + return errFound } // SaveBlockDataToStorage saves the data from block body into storage units func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + // Length of body types + another go routine for the intermediate transactions + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } - err := preproc.SaveTxBlockToStorage(blockBody) - if err != nil { - log.Debug(err.Error()) + err := preproc.SaveTxBlockToStorage(blockBody) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreproc == nil { - return errFound - } + intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) + if intermediatePreproc == nil { + return errFound + } - err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - return errFound + return errFound } // RestoreBlockDataFromStorage restores block data from storage to pool func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - localMutex := sync.Mutex{} - totalRestoredTx := 0 - restoredMbHashes := make(map[int][][]byte) + var errFound error + localMutex := sync.Mutex{} + totalRestoredTx := 0 + restoredMbHashes := make(map[int][][]byte) - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - restoredMbs := make(map[int][]byte) + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + restoredMbs := make(map[int][]byte) - preproc := tc.getPreProcessor(blockType) - if preproc == nil { - wg.Done() - return - } + preproc := tc.getPreProcessor(blockType) + if preproc == nil { + wg.Done() + return + } - restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - localMutex.Lock() - errFound = err - localMutex.Unlock() - } + localMutex.Lock() + errFound = err + localMutex.Unlock() + } - localMutex.Lock() - totalRestoredTx += restoredTxs + localMutex.Lock() + totalRestoredTx += restoredTxs - for shId, mbHash := range restoredMbs { - restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) - } + for shId, mbHash := range restoredMbs { + restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) + } - localMutex.Unlock() + localMutex.Unlock() - wg.Done() - }(key, value) - } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return totalRestoredTx, restoredMbHashes, errFound + return totalRestoredTx, restoredMbHashes, errFound } // RemoveBlockDataFromPool deletes block data from pools func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error { - separatedBodies := tc.separateBodyByType(body) + separatedBodies := tc.separateBodyByType(body) - var errFound error - errMutex := sync.Mutex{} + var errFound error + errMutex := sync.Mutex{} - wg := sync.WaitGroup{} - wg.Add(len(separatedBodies)) + wg := sync.WaitGroup{} + wg.Add(len(separatedBodies)) - for key, value := range separatedBodies { - go func(blockType block.Type, blockBody block.Body) { - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - wg.Done() - return - } + for key, value := range separatedBodies { + go func(blockType block.Type, blockBody block.Body) { + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + wg.Done() + return + } - err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) - if err != nil { - log.Debug(err.Error()) + err := preproc.RemoveTxBlockFromPools(blockBody, tc.miniBlockPool) + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(key, value) - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(key, value) + } - wg.Wait() + wg.Wait() - return errFound + return errFound } // ProcessBlockTransaction processes transactions and updates state tries func (tc *transactionCoordinator) ProcessBlockTransaction( - body block.Body, - round uint64, - haveTime func() time.Duration, + body block.Body, + round uint64, + haveTime func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysTxPreProcs { - if separatedBodies[blockType] == nil { - continue - } - - preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { - return process.ErrMissingPreProcessor - } - - err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) - if err != nil { - return err - } - } - - return nil + separatedBodies := tc.separateBodyByType(body) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysTxPreProcs { + if separatedBodies[blockType] == nil { + continue + } + + preproc := tc.getPreProcessor(blockType) + if preproc == nil || preproc.IsInterfaceNil() { + return process.ErrMissingPreProcessor + } + + err := preproc.ProcessBlockTransactions(separatedBodies[blockType], round, haveTime) + if err != nil { + return err + } + } + + return nil } // CreateMbsAndProcessCrossShardTransactionsDstMe creates miniblocks and processes cross shard transaction // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr data.HeaderHandler, - maxTxRemaining uint32, - maxMbRemaining uint32, - round uint64, - haveTime func() bool, + hdr data.HeaderHandler, + maxTxRemaining uint32, + maxMbRemaining uint32, + round uint64, + haveTime func() bool, ) (block.MiniBlockSlice, uint32, bool) { - miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) - nrMBprocessed := 0 - - if hdr == nil || hdr.IsInterfaceNil() { - return miniBlocks, nrTxAdded, true - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - if !haveTime() { - break - } - - if hdr.GetMiniBlockProcessed([]byte(key)) { - nrMBprocessed++ - continue - } - - miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) - if miniVal == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - continue - } - - miniBlock, ok := miniVal.(*block.MiniBlock) - if !ok { - continue - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - // overflow would happen if processing would continue - txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining - if txOverFlow { - return miniBlocks, nrTxAdded, false - } - - requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) - if requestedTxs > 0 { - continue - } - - err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) - if err != nil { - continue - } - - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, miniBlock) - nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) - nrMBprocessed++ - - mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining - if mbOverFlow { - return miniBlocks, nrTxAdded, false - } - } - - allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) - return miniBlocks, nrTxAdded, allMBsProcessed + miniBlocks := make(block.MiniBlockSlice, 0) + nrTxAdded := uint32(0) + nrMBprocessed := 0 + + if hdr == nil || hdr.IsInterfaceNil() { + return miniBlocks, nrTxAdded, true + } + + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + if !haveTime() { + break + } + + if hdr.GetMiniBlockProcessed([]byte(key)) { + nrMBprocessed++ + continue + } + + miniVal, _ := tc.miniBlockPool.Peek([]byte(key)) + if miniVal == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + continue + } + + miniBlock, ok := miniVal.(*block.MiniBlock) + if !ok { + continue + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + // overflow would happen if processing would continue + txOverFlow := nrTxAdded+uint32(len(miniBlock.TxHashes)) > maxTxRemaining + if txOverFlow { + return miniBlocks, nrTxAdded, false + } + + requestedTxs := preproc.RequestTransactionsForMiniBlock(*miniBlock) + if requestedTxs > 0 { + continue + } + + err := tc.processCompleteMiniBlock(preproc, miniBlock, round, haveTime) + if err != nil { + continue + } + + // all txs processed, add to processed miniblocks + miniBlocks = append(miniBlocks, miniBlock) + nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) + nrMBprocessed++ + + mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining + if mbOverFlow { + return miniBlocks, nrTxAdded, false + } + } + + allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) + return miniBlocks, nrTxAdded, allMBsProcessed } // CreateMbsAndProcessTransactionsFromMe creates miniblocks and processes transactions from pool func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( - maxTxSpaceRemained uint32, - maxMbSpaceRemained uint32, - round uint64, - haveTime func() bool, + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false - - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } - - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } - - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } - - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } - } - } - - interMBs := tc.processAddedInterimTransactions() - if len(interMBs) > 0 { - miniBlocks = append(miniBlocks, interMBs...) - } - - rewardsMBs := tc.createRewardsMiniBlocks() - if len(rewardsMBs) > 0 { - miniBlocks = append(miniBlocks, rewardsMBs...) - } - - return miniBlocks + txPreProc := tc.getPreProcessor(block.TxBlock) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + txSpaceRemained := int(maxTxSpaceRemained) + + newMBAdded := true + for newMBAdded { + newMBAdded = false + + for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { + if txSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txPreProc.CreateAndProcessMiniBlock( + tc.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + interMBs := tc.processAddedInterimTransactions() + if len(interMBs) > 0 { + miniBlocks = append(miniBlocks, interMBs...) + } + + rewardsMBs := tc.createRewardsMiniBlocks() + if len(rewardsMBs) > 0 { + miniBlocks = append(miniBlocks, rewardsMBs...) + } + + return miniBlocks } func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { - // add rewards transactions to separate miniBlocks - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - rewardsMbs := interimProc.CreateAllInterMiniBlocks() - for _, mb := range rewardsMbs { - miniBlocks = append(miniBlocks, mb) - } - - return miniBlocks + // add rewards transactions to separate miniBlocks + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + miniBlocks := make(block.MiniBlockSlice, 0) + rewardsMbs := interimProc.CreateAllInterMiniBlocks() + for _, mb := range rewardsMbs { + miniBlocks = append(miniBlocks, mb) + } + + return miniBlocks } func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { - miniBlocks := make(block.MiniBlockSlice, 0) - - // processing has to be done in order, as the order of different type of transactions over the same account is strict - for _, blockType := range tc.keysInterimProcs { - if blockType == block.RewardsBlock { - // this has to be processed last - continue - } - - interimProc := tc.getInterimProcessor(blockType) - if interimProc == nil { - // this will never be reached as keysInterimProcs are the actual keys from the interimMap - continue - } - - currMbs := interimProc.CreateAllInterMiniBlocks() - for _, value := range currMbs { - miniBlocks = append(miniBlocks, value) - } - } - - return miniBlocks + miniBlocks := make(block.MiniBlockSlice, 0) + + // processing has to be done in order, as the order of different type of transactions over the same account is strict + for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + + interimProc := tc.getInterimProcessor(blockType) + if interimProc == nil { + // this will never be reached as keysInterimProcs are the actual keys from the interimMap + continue + } + + currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { + miniBlocks = append(miniBlocks, value) + } + } + + return miniBlocks } // CreateBlockStarted initializes necessary data for preprocessors at block create or block process func (tc *transactionCoordinator) CreateBlockStarted() { - tc.mutPreProcessor.RLock() - for _, value := range tc.txPreProcessors { - value.CreateBlockStarted() - } - tc.mutPreProcessor.RUnlock() - - tc.mutInterimProcessors.RLock() - for _, value := range tc.interimProcessors { - value.CreateBlockStarted() - } - tc.mutInterimProcessors.RUnlock() + tc.mutPreProcessor.RLock() + for _, value := range tc.txPreProcessors { + value.CreateBlockStarted() + } + tc.mutPreProcessor.RUnlock() + + tc.mutInterimProcessors.RLock() + for _, value := range tc.interimProcessors { + value.CreateBlockStarted() + } + tc.mutInterimProcessors.RUnlock() } func (tc *transactionCoordinator) getPreProcessor(blockType block.Type) process.PreProcessor { - tc.mutPreProcessor.RLock() - preprocessor, exists := tc.txPreProcessors[blockType] - tc.mutPreProcessor.RUnlock() + tc.mutPreProcessor.RLock() + preprocessor, exists := tc.txPreProcessors[blockType] + tc.mutPreProcessor.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return preprocessor + return preprocessor } func (tc *transactionCoordinator) getInterimProcessor(blockType block.Type) process.IntermediateTransactionHandler { - tc.mutInterimProcessors.RLock() - interProcessor, exists := tc.interimProcessors[blockType] - tc.mutInterimProcessors.RUnlock() + tc.mutInterimProcessors.RLock() + interProcessor, exists := tc.interimProcessors[blockType] + tc.mutInterimProcessors.RUnlock() - if !exists { - return nil - } + if !exists { + return nil + } - return interProcessor + return interProcessor } func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType block.Type) (string, error) { - var baseTopic string - - switch mbType { - case block.TxBlock: - baseTopic = factory.TransactionTopic - case block.PeerBlock: - baseTopic = factory.PeerChBodyTopic - case block.SmartContractResultBlock: - baseTopic = factory.UnsignedTransactionTopic - case block.RewardsBlock: - baseTopic = factory.RewardsTransactionTopic - default: - return "", process.ErrUnknownBlockType - } - - transactionTopic := baseTopic + - shardC.CommunicationIdentifier(destShId) - - return transactionTopic, nil + var baseTopic string + + switch mbType { + case block.TxBlock: + baseTopic = factory.TransactionTopic + case block.PeerBlock: + baseTopic = factory.PeerChBodyTopic + case block.SmartContractResultBlock: + baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlock: + baseTopic = factory.RewardsTransactionTopic + default: + return "", process.ErrUnknownBlockType + } + + transactionTopic := baseTopic + + shardC.CommunicationIdentifier(destShId) + + return transactionTopic, nil } // CreateMarshalizedData creates marshalized data for broadcasting func (tc *transactionCoordinator) CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) { - mrsTxs := make(map[string][][]byte) - bodies := make(map[uint32]block.MiniBlockSlice) - - for i := 0; i < len(body); i++ { - miniblock := body[i] - receiverShardId := miniblock.ReceiverShardID - if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard - continue - } - - broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) - if err != nil { - log.Debug(err.Error()) - continue - } - - preproc := tc.getPreProcessor(miniblock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - continue - } - - bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) - - currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) - } - - interimProc := tc.getInterimProcessor(miniblock.Type) - if interimProc == nil || interimProc.IsInterfaceNil() { - continue - } - - currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) - if err != nil { - log.Debug(err.Error()) - continue - } - - if len(currMrsInterTxs) > 0 { - mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) - } - } - - return bodies, mrsTxs + mrsTxs := make(map[string][][]byte) + bodies := make(map[uint32]block.MiniBlockSlice) + + for i := 0; i < len(body); i++ { + miniblock := body[i] + receiverShardId := miniblock.ReceiverShardID + if receiverShardId == tc.shardCoordinator.SelfId() { // not taking into account miniblocks for current shard + continue + } + + broadcastTopic, err := createBroadcastTopic(tc.shardCoordinator, receiverShardId, miniblock.Type) + if err != nil { + log.Debug(err.Error()) + continue + } + + preproc := tc.getPreProcessor(miniblock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + continue + } + + bodies[receiverShardId] = append(bodies[receiverShardId], miniblock) + + currMrsTxs, err := preproc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsTxs...) + } + + interimProc := tc.getInterimProcessor(miniblock.Type) + if interimProc == nil || interimProc.IsInterfaceNil() { + continue + } + + currMrsInterTxs, err := interimProc.CreateMarshalizedData(miniblock.TxHashes) + if err != nil { + log.Debug(err.Error()) + continue + } + + if len(currMrsInterTxs) > 0 { + mrsTxs[broadcastTopic] = append(mrsTxs[broadcastTopic], currMrsInterTxs...) + } + } + + return bodies, mrsTxs } // GetAllCurrentUsedTxs returns the cached transaction data for current round func (tc *transactionCoordinator) GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler { - txPool := make(map[string]data.TransactionHandler, 0) - interTxPool := make(map[string]data.TransactionHandler, 0) + txPool := make(map[string]data.TransactionHandler, 0) + interTxPool := make(map[string]data.TransactionHandler, 0) - preProc := tc.getPreProcessor(blockType) - if preProc != nil { - txPool = preProc.GetAllCurrentUsedTxs() - } + preProc := tc.getPreProcessor(blockType) + if preProc != nil { + txPool = preProc.GetAllCurrentUsedTxs() + } - interProc := tc.getInterimProcessor(blockType) - if interProc != nil { - interTxPool = interProc.GetAllCurrentFinishedTxs() - } + interProc := tc.getInterimProcessor(blockType) + if interProc != nil { + interTxPool = interProc.GetAllCurrentFinishedTxs() + } - for hash, tx := range interTxPool { - txPool[hash] = tx - } + for hash, tx := range interTxPool { + txPool[hash] = tx + } - return txPool + return txPool } // RequestMiniBlocks request miniblocks if missing func (tc *transactionCoordinator) RequestMiniBlocks(header data.HeaderHandler) { - if header == nil || header.IsInterfaceNil() { - return - } - - crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) - for key, senderShardId := range crossMiniBlockHashes { - obj, _ := tc.miniBlockPool.Peek([]byte(key)) - if obj == nil { - go tc.onRequestMiniBlock(senderShardId, []byte(key)) - } - } + if header == nil || header.IsInterfaceNil() { + return + } + + crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) + for key, senderShardId := range crossMiniBlockHashes { + obj, _ := tc.miniBlockPool.Peek([]byte(key)) + if obj == nil { + go tc.onRequestMiniBlock(senderShardId, []byte(key)) + } + } } // receivedMiniBlock is a callback function when a new miniblock was received // it will further ask for missing transactions func (tc *transactionCoordinator) receivedMiniBlock(miniBlockHash []byte) { - val, ok := tc.miniBlockPool.Peek(miniBlockHash) - if !ok { - return - } - - miniBlock, ok := val.(block.MiniBlock) - if !ok { - return - } - - preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { - return - } - - _ = preproc.RequestTransactionsForMiniBlock(miniBlock) + val, ok := tc.miniBlockPool.Peek(miniBlockHash) + if !ok { + return + } + + miniBlock, ok := val.(block.MiniBlock) + if !ok { + return + } + + preproc := tc.getPreProcessor(miniBlock.Type) + if preproc == nil || preproc.IsInterfaceNil() { + return + } + + _ = preproc.RequestTransactionsForMiniBlock(miniBlock) } // processMiniBlockComplete - all transactions must be processed together, otherwise error func (tc *transactionCoordinator) processCompleteMiniBlock( - preproc process.PreProcessor, - miniBlock *block.MiniBlock, - round uint64, - haveTime func() bool, + preproc process.PreProcessor, + miniBlock *block.MiniBlock, + round uint64, + haveTime func() bool, ) error { - snapshot := tc.accounts.JournalLen() - err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) - if err != nil { - log.Debug(err.Error()) - errAccountState := tc.accounts.RevertToSnapshot(snapshot) - if errAccountState != nil { - // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Error(errAccountState.Error()) - } + snapshot := tc.accounts.JournalLen() + err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) + if err != nil { + log.Debug(err.Error()) + errAccountState := tc.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } - return err - } + return err + } - return nil + return nil } // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { - tc.mutInterimProcessors.RLock() - defer tc.mutInterimProcessors.RUnlock() - errMutex := sync.Mutex{} - var errFound error - // TODO: think if it is good in parallel or it is needed in sequences - wg := sync.WaitGroup{} - wg.Add(len(tc.interimProcessors)) - - for key, interimProc := range tc.interimProcessors { - if key == block.RewardsBlock { - // this has to be processed last - wg.Done() - continue - } - - go func(intermediateProcessor process.IntermediateTransactionHandler) { - err := intermediateProcessor.VerifyInterMiniBlocks(body) - if err != nil { - errMutex.Lock() - errFound = err - errMutex.Unlock() - } - wg.Done() - }(interimProc) - } - - wg.Wait() - - if errFound != nil { - return errFound - } - - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - return interimProc.VerifyInterMiniBlocks(body) + tc.mutInterimProcessors.RLock() + defer tc.mutInterimProcessors.RUnlock() + errMutex := sync.Mutex{} + var errFound error + // TODO: think if it is good in parallel or it is needed in sequences + wg := sync.WaitGroup{} + wg.Add(len(tc.interimProcessors)) + + for key, interimProc := range tc.interimProcessors { + if key == block.RewardsBlock { + // this has to be processed last + wg.Done() + continue + } + + go func(intermediateProcessor process.IntermediateTransactionHandler) { + err := intermediateProcessor.VerifyInterMiniBlocks(body) + if err != nil { + errMutex.Lock() + errFound = err + errMutex.Unlock() + } + wg.Done() + }(interimProc) + } + + wg.Wait() + + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } // IsInterfaceNil returns true if there is no value under the interface func (tc *transactionCoordinator) IsInterfaceNil() bool { - if tc == nil { - return true - } - return false + if tc == nil { + return true + } + return false } diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index affa52cb1c1..b07dd0f535d 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -41,7 +41,7 @@ func NewInterceptorsContainerFactory( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil(){ + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { return nil, process.ErrNilNodesCoordinator } if messenger == nil { diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 43cd91f138e..a0016b3b0ee 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -18,7 +18,7 @@ type intermediateProcessorsContainerFactory struct { hasher hashing.Hasher addrConverter state.AddressConverter specialAddressHandler process.SpecialAddressHandler - store dataRetriever.StorageService + store dataRetriever.StorageService } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -43,10 +43,10 @@ func NewIntermediateProcessorsContainerFactory( if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil(){ + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { return nil, process.ErrNilSpecialAddressHandler } - if store == nil || store.IsInterfaceNil(){ + if store == nil || store.IsInterfaceNil() { return nil, process.ErrNilStorage } @@ -56,7 +56,7 @@ func NewIntermediateProcessorsContainerFactory( hasher: hasher, addrConverter: addrConverter, specialAddressHandler: specialAddressHandler, - store: store, + store: store, }, nil } @@ -117,4 +117,4 @@ func (ppcm *intermediateProcessorsContainerFactory) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 87e38db556b..09bc0573f1b 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -73,9 +73,9 @@ func NewPreProcessorsContainerFactory( if scResultProcessor == nil || scResultProcessor.IsInterfaceNil() { return nil, process.ErrNilSmartContractResultProcessor } - if rewardsTxProcessor == nil || rewardsTxProcessor.IsInterfaceNil() { - return nil, process.ErrNilRewardsTxProcessor - } + if rewardsTxProcessor == nil || rewardsTxProcessor.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxProcessor + } if requestHandler == nil || requestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } @@ -180,8 +180,8 @@ func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor( // IsInterfaceNil returns true if there is no value under the interface func (ppcm *preProcessorsContainerFactory) IsInterfaceNil() bool { - if ppcm == nil { - return true - } - return false + if ppcm == nil { + return true + } + return false } diff --git a/process/interface.go b/process/interface.go index c7603d78f6b..a10ee84d65e 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1,378 +1,378 @@ package process import ( - "math/big" - "time" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" - "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "math/big" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine type TransactionProcessor interface { - ProcessTransaction(transaction *transaction.Transaction, round uint64) error - IsInterfaceNil() bool + ProcessTransaction(transaction *transaction.Transaction, round uint64) error + IsInterfaceNil() bool } // RewardTransactionProcessor is the interface for reward transaction execution engine type RewardTransactionProcessor interface { - ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error - IsInterfaceNil() bool + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error + IsInterfaceNil() bool } // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { - ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error - IsInterfaceNil() bool + ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error + IsInterfaceNil() bool } // TxTypeHandler is an interface to calculate the transaction type type TxTypeHandler interface { - ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) - IsInterfaceNil() bool + ComputeTransactionType(tx data.TransactionHandler) (TransactionType, error) + IsInterfaceNil() bool } // TxValidator can determine if a provided transaction handler is valid or not from the process point of view type TxValidator interface { - IsTxValidForProcessing(txHandler data.TransactionHandler) bool - IsInterfaceNil() bool + IsTxValidForProcessing(txHandler data.TransactionHandler) bool + IsInterfaceNil() bool } // HeaderValidator can determine if a provided header handler is valid or not from the process point of view type HeaderValidator interface { - IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool - IsInterfaceNil() bool + IsHeaderValidForProcessing(headerHandler data.HeaderHandler) bool + IsInterfaceNil() bool } // TransactionCoordinator is an interface to coordinate transaction processing using multiple processors type TransactionCoordinator interface { - RequestMiniBlocks(header data.HeaderHandler) - RequestBlockTransactions(body block.Body) - IsDataPreparedForProcessing(haveTime func() time.Duration) error + RequestMiniBlocks(header data.HeaderHandler) + RequestBlockTransactions(body block.Body) + IsDataPreparedForProcessing(haveTime func() time.Duration) error - SaveBlockDataToStorage(body block.Body) error - RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) - RemoveBlockDataFromPool(body block.Body) error + SaveBlockDataToStorage(body block.Body) error + RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) + RemoveBlockDataFromPool(body block.Body) error - ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error - CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) - CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice + CreateBlockStarted() + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice - CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) + CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) - GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler + GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactions(body block.Body) error - IsInterfaceNil() bool + VerifyCreatedBlockTransactions(body block.Body) error + IsInterfaceNil() bool } // SmartContractProcessor is the main interface for the smart contract caller engine type SmartContractProcessor interface { - ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) - ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error - DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error - IsInterfaceNil() bool + ComputeTransactionType(tx *transaction.Transaction) (TransactionType, error) + ExecuteSmartContractTransaction(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error + DeploySmartContract(tx *transaction.Transaction, acntSrc state.AccountHandler, round uint64) error + IsInterfaceNil() bool } // IntermediateTransactionHandler handles transactions which are not resolved in only one step type IntermediateTransactionHandler interface { - AddIntermediateTransactions(txs []data.TransactionHandler) error - CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock - VerifyInterMiniBlocks(body block.Body) error - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - SaveCurrentIntermediateTxToStorage() error - GetAllCurrentFinishedTxs() map[string]data.TransactionHandler - CreateBlockStarted() - IsInterfaceNil() bool + AddIntermediateTransactions(txs []data.TransactionHandler) error + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + VerifyInterMiniBlocks(body block.Body) error + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + SaveCurrentIntermediateTxToStorage() error + GetAllCurrentFinishedTxs() map[string]data.TransactionHandler + CreateBlockStarted() + IsInterfaceNil() bool } // TransactionVerifier interface validates if the transaction is good and if it should be processed type TransactionVerifier interface { - IsTransactionValid(tx data.TransactionHandler) error + IsTransactionValid(tx data.TransactionHandler) error } // UnsignedTxHandler creates and verifies unsigned transactions for current round type UnsignedTxHandler interface { - CleanProcessedUTxs() - ProcessTransactionFee(cost *big.Int) - CreateAllUTxs() []data.TransactionHandler - VerifyCreatedUTxs() error - AddRewardTxFromBlock(tx data.TransactionHandler) - IsInterfaceNil() bool + CleanProcessedUTxs() + ProcessTransactionFee(cost *big.Int) + CreateAllUTxs() []data.TransactionHandler + VerifyCreatedUTxs() error + AddRewardTxFromBlock(tx data.TransactionHandler) + IsInterfaceNil() bool } // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { - SetElrondCommunityAddress(elrond []byte) - ElrondCommunityAddress() []byte - SetConsensusRewardAddresses(consensusRewardAddresses []string) - ConsensusRewardAddresses() []string - LeaderAddress() []byte - BurnAddress() []byte - ShardIdForAddress([]byte) (uint32, error) - IsInterfaceNil() bool + SetElrondCommunityAddress(elrond []byte) + ElrondCommunityAddress() []byte + SetConsensusRewardAddresses(consensusRewardAddresses []string) + ConsensusRewardAddresses() []string + LeaderAddress() []byte + BurnAddress() []byte + ShardIdForAddress([]byte) (uint32, error) + IsInterfaceNil() bool } // PreProcessor is an interface used to prepare and process transaction data type PreProcessor interface { - CreateBlockStarted() - IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error + CreateBlockStarted() + IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error - RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) - SaveTxBlockToStorage(body block.Body) error + RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error + RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error - RequestBlockTransactions(body block.Body) int + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + RequestBlockTransactions(body block.Body) int - CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) + CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) - RequestTransactionsForMiniBlock(mb block.MiniBlock) int - ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error - CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + RequestTransactionsForMiniBlock(mb block.MiniBlock) int + ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) - GetAllCurrentUsedTxs() map[string]data.TransactionHandler - IsInterfaceNil() bool + GetAllCurrentUsedTxs() map[string]data.TransactionHandler + IsInterfaceNil() bool } // BlockProcessor is the main interface for block execution engine type BlockProcessor interface { - ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountState() - CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBody(dta []byte) data.BodyHandler - DecodeBlockHeader(dta []byte) data.HeaderHandler - AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string) - IsInterfaceNil() bool + ProcessBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlock(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountState() + CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeader(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBody(dta []byte) data.BodyHandler + DecodeBlockHeader(dta []byte) data.HeaderHandler + AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusRewardAddresses(consensusRewardAddresses []string) + IsInterfaceNil() bool } // Checker provides functionality to checks the integrity and validity of a data structure type Checker interface { - // IntegrityAndValidity does both validity and integrity checks on the data structure - IntegrityAndValidity(coordinator sharding.Coordinator) error - // Integrity checks only the integrity of the data - Integrity(coordinator sharding.Coordinator) error - // IsInterfaceNil returns true if there is no value under the interface - IsInterfaceNil() bool + // IntegrityAndValidity does both validity and integrity checks on the data structure + IntegrityAndValidity(coordinator sharding.Coordinator) error + // Integrity checks only the integrity of the data + Integrity(coordinator sharding.Coordinator) error + // IsInterfaceNil returns true if there is no value under the interface + IsInterfaceNil() bool } // SigVerifier provides functionality to verify a signature of a signed data structure that holds also the verifying parameters type SigVerifier interface { - VerifySig() error + VerifySig() error } // SignedDataValidator provides functionality to check the validity and signature of a data structure type SignedDataValidator interface { - SigVerifier - Checker + SigVerifier + Checker } // HashAccesser interface provides functionality over hashable objects type HashAccesser interface { - SetHash([]byte) - Hash() []byte + SetHash([]byte) + Hash() []byte } // InterceptedBlockBody interface provides functionality over intercepted blocks type InterceptedBlockBody interface { - Checker - HashAccesser - GetUnderlyingObject() interface{} + Checker + HashAccesser + GetUnderlyingObject() interface{} } // Bootstrapper is an interface that defines the behaviour of a struct that is able // to synchronize the node type Bootstrapper interface { - AddSyncStateListener(func(isSyncing bool)) - ShouldSync() bool - StopSync() - StartSync() - IsInterfaceNil() bool + AddSyncStateListener(func(isSyncing bool)) + ShouldSync() bool + StopSync() + StartSync() + IsInterfaceNil() bool } // ForkDetector is an interface that defines the behaviour of a struct that is able // to detect forks type ForkDetector interface { - AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error - RemoveHeaders(nonce uint64, hash []byte) - CheckFork() (forkDetected bool, nonce uint64, hash []byte) - GetHighestFinalBlockNonce() uint64 - ProbableHighestNonce() uint64 - ResetProbableHighestNonceIfNeeded() - IsInterfaceNil() bool + AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error + RemoveHeaders(nonce uint64, hash []byte) + CheckFork() (forkDetected bool, nonce uint64, hash []byte) + GetHighestFinalBlockNonce() uint64 + ProbableHighestNonce() uint64 + ResetProbableHighestNonceIfNeeded() + IsInterfaceNil() bool } // InterceptorsContainer defines an interceptors holder data type with basic functionality type InterceptorsContainer interface { - Get(key string) (Interceptor, error) - Add(key string, val Interceptor) error - AddMultiple(keys []string, interceptors []Interceptor) error - Replace(key string, val Interceptor) error - Remove(key string) - Len() int - IsInterfaceNil() bool + Get(key string) (Interceptor, error) + Add(key string, val Interceptor) error + AddMultiple(keys []string, interceptors []Interceptor) error + Replace(key string, val Interceptor) error + Remove(key string) + Len() int + IsInterfaceNil() bool } // InterceptorsContainerFactory defines the functionality to create an interceptors container type InterceptorsContainerFactory interface { - Create() (InterceptorsContainer, error) - IsInterfaceNil() bool + Create() (InterceptorsContainer, error) + IsInterfaceNil() bool } // PreProcessorsContainer defines an PreProcessors holder data type with basic functionality type PreProcessorsContainer interface { - Get(key block.Type) (PreProcessor, error) - Add(key block.Type, val PreProcessor) error - AddMultiple(keys []block.Type, preprocessors []PreProcessor) error - Replace(key block.Type, val PreProcessor) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (PreProcessor, error) + Add(key block.Type, val PreProcessor) error + AddMultiple(keys []block.Type, preprocessors []PreProcessor) error + Replace(key block.Type, val PreProcessor) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // PreProcessorsContainerFactory defines the functionality to create an PreProcessors container type PreProcessorsContainerFactory interface { - Create() (PreProcessorsContainer, error) - IsInterfaceNil() bool + Create() (PreProcessorsContainer, error) + IsInterfaceNil() bool } // IntermediateProcessorContainer defines an IntermediateProcessor holder data type with basic functionality type IntermediateProcessorContainer interface { - Get(key block.Type) (IntermediateTransactionHandler, error) - Add(key block.Type, val IntermediateTransactionHandler) error - AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error - Replace(key block.Type, val IntermediateTransactionHandler) error - Remove(key block.Type) - Len() int - Keys() []block.Type - IsInterfaceNil() bool + Get(key block.Type) (IntermediateTransactionHandler, error) + Add(key block.Type, val IntermediateTransactionHandler) error + AddMultiple(keys []block.Type, preprocessors []IntermediateTransactionHandler) error + Replace(key block.Type, val IntermediateTransactionHandler) error + Remove(key block.Type) + Len() int + Keys() []block.Type + IsInterfaceNil() bool } // IntermediateProcessorsContainerFactory defines the functionality to create an IntermediateProcessors container type IntermediateProcessorsContainerFactory interface { - Create() (IntermediateProcessorContainer, error) - IsInterfaceNil() bool + Create() (IntermediateProcessorContainer, error) + IsInterfaceNil() bool } // VirtualMachinesContainer defines a virtual machine holder data type with basic functionality type VirtualMachinesContainer interface { - Get(key []byte) (vmcommon.VMExecutionHandler, error) - Add(key []byte, val vmcommon.VMExecutionHandler) error - AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error - Replace(key []byte, val vmcommon.VMExecutionHandler) error - Remove(key []byte) - Len() int - Keys() [][]byte - IsInterfaceNil() bool + Get(key []byte) (vmcommon.VMExecutionHandler, error) + Add(key []byte, val vmcommon.VMExecutionHandler) error + AddMultiple(keys [][]byte, vms []vmcommon.VMExecutionHandler) error + Replace(key []byte, val vmcommon.VMExecutionHandler) error + Remove(key []byte) + Len() int + Keys() [][]byte + IsInterfaceNil() bool } // VirtualMachinesContainerFactory defines the functionality to create a virtual machine container type VirtualMachinesContainerFactory interface { - Create() (VirtualMachinesContainer, error) - VMAccountsDB() *hooks.VMAccountsDB - IsInterfaceNil() bool + Create() (VirtualMachinesContainer, error) + VMAccountsDB() *hooks.VMAccountsDB + IsInterfaceNil() bool } // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P) error - IsInterfaceNil() bool + ProcessReceivedMessage(message p2p.MessageP2P) error + IsInterfaceNil() bool } // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { - ConnectedPeersOnTopic(topic string) []p2p.PeerID - SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error - IsInterfaceNil() bool + ConnectedPeersOnTopic(topic string) []p2p.PeerID + SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error + IsInterfaceNil() bool } // TopicHandler defines the functionality needed by structs to manage topics and message processors type TopicHandler interface { - HasTopic(name string) bool - CreateTopic(name string, createChannelForTopic bool) error - RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error + HasTopic(name string) bool + CreateTopic(name string, createChannelForTopic bool) error + RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error } // TopicMessageHandler defines the functionality needed by structs to manage topics, message processors and to send data // to other peers type TopicMessageHandler interface { - MessageHandler - TopicHandler + MessageHandler + TopicHandler } // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { - PackDataInChunks(data [][]byte, limit int) ([][]byte, error) - IsInterfaceNil() bool + PackDataInChunks(data [][]byte, limit int) ([][]byte, error) + IsInterfaceNil() bool } // BlocksTracker defines the functionality to track all the notarised blocks type BlocksTracker interface { - UnnotarisedBlocks() []data.HeaderHandler - RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error - AddBlock(headerHandler data.HeaderHandler) - SetBlockBroadcastRound(nonce uint64, round int64) - BlockBroadcastRound(nonce uint64) int64 - IsInterfaceNil() bool + UnnotarisedBlocks() []data.HeaderHandler + RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error + AddBlock(headerHandler data.HeaderHandler) + SetBlockBroadcastRound(nonce uint64, round int64) + BlockBroadcastRound(nonce uint64) int64 + IsInterfaceNil() bool } // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { - RequestHeaderByNonce(shardId uint32, nonce uint64) - RequestTransaction(shardId uint32, txHashes [][]byte) - RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) - RequestRewardTransactions(destShardID uint32, txHashes [][]byte) - RequestMiniBlock(shardId uint32, miniblockHash []byte) - RequestHeader(shardId uint32, hash []byte) - IsInterfaceNil() bool + RequestHeaderByNonce(shardId uint32, nonce uint64) + RequestTransaction(shardId uint32, txHashes [][]byte) + RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) + RequestMiniBlock(shardId uint32, miniblockHash []byte) + RequestHeader(shardId uint32, hash []byte) + IsInterfaceNil() bool } // ArgumentsParser defines the functionality to parse transaction data into arguments and code for smart contracts type ArgumentsParser interface { - GetArguments() ([]*big.Int, error) - GetCode() ([]byte, error) - GetFunction() (string, error) - ParseData(data string) error + GetArguments() ([]*big.Int, error) + GetCode() ([]byte, error) + GetFunction() (string, error) + ParseData(data string) error - CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string - GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) - IsInterfaceNil() bool + CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string + GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) + IsInterfaceNil() bool } // TemporaryAccountsHandler defines the functionality to create temporary accounts and pass to VM. // This holder will contain usually one account from shard X that calls a SC in shard Y // so when executing the code in shard Y, this impl will hold an ephemeral copy of the sender account from shard X type TemporaryAccountsHandler interface { - AddTempAccount(address []byte, balance *big.Int, nonce uint64) - CleanTempAccounts() - TempAccount(address []byte) state.AccountHandler - IsInterfaceNil() bool + AddTempAccount(address []byte, balance *big.Int, nonce uint64) + CleanTempAccounts() + TempAccount(address []byte) state.AccountHandler + IsInterfaceNil() bool } // BlockSizeThrottler defines the functionality of adapting the node to the network speed/latency when it should send a // block to its peers which should be received in a limited time frame type BlockSizeThrottler interface { - MaxItemsToAdd() uint32 - Add(round uint64, items uint32) - Succeed(round uint64) - ComputeMaxItems() - IsInterfaceNil() bool + MaxItemsToAdd() uint32 + Add(round uint64, items uint32) + Succeed(round uint64) + ComputeMaxItems() + IsInterfaceNil() bool } diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 44e2351f895..4c46121639b 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -73,8 +73,8 @@ func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string) { // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { - if blProcMock == nil { - return true - } - return false + if blProcMock == nil { + return true + } + return false } diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 9058235e740..c9bc56f6276 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -1,180 +1,180 @@ package mock import ( - "bytes" - "fmt" - "math/big" + "bytes" + "fmt" + "math/big" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding" ) // NodesCoordinator defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]sharding.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) - LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) } func NewNodesCoordinatorMock() *NodesCoordinatorMock { - nbShards := uint32(1) - nodesPerShard := 2 - validatorsMap := make(map[uint32][]sharding.Validator) - - for sh := uint32(0); sh < nbShards; sh++ { - validatorsList := make([]sharding.Validator, nodesPerShard) - for v := 0; v < nodesPerShard; v++ { - validatorsList[v], _ = sharding.NewValidator( - big.NewInt(10), - 1, - []byte(fmt.Sprintf("pubKey%d%d", sh, v)), - []byte(fmt.Sprintf("address%d%d", sh, v)), - ) - } - validatorsMap[sh] = validatorsList - } - - return &NodesCoordinatorMock{ - ShardConsensusSize: 1, - MetaConsensusSize: 1, - ShardId: 0, - NbShards: nbShards, - Validators: validatorsMap, - } + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + for sh := uint32(0); sh < nbShards; sh++ { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } } func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { - if ncm.GetSelectedPublicKeysCalled != nil { - return ncm.GetSelectedPublicKeysCalled(selection, shardId) - } + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } - if len(ncm.Validators) == 0 { - return nil, sharding.ErrNilInputNodesMap - } + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } - pubKeys := make([]string, 0) + pubKeys := make([]string, 0) - for _, v := range ncm.Validators[shardId] { - pubKeys = append(pubKeys, string(v.PubKey())) - } + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } - return pubKeys, nil + return pubKeys, nil } func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( - randomness []byte, - round uint64, - shardId uint32, + randomness []byte, + round uint64, + shardId uint32, ) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) - } + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } - validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) - if err != nil { - return nil, err - } + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } - valGrStr := make([]string, 0) + valGrStr := make([]string, 0) - for _, v := range validators { - valGrStr = append(valGrStr, string(v.PubKey())) - } + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } - return valGrStr, nil + return valGrStr, nil } func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( - randomness []byte, - round uint64, - shardId uint32, + randomness []byte, + round uint64, + shardId uint32, ) ([]string, error) { - if ncm.GetValidatorsPublicKeysCalled != nil { - return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) - } + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } - validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) - if err != nil { - return nil, err - } + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } - addresses := make([]string, 0) - for _, v := range validators { - addresses = append(addresses, string(v.Address())) - } + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } - return addresses, nil + return addresses, nil } func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { - if ncm.LoadNodesPerShardsCalled != nil { - return ncm.LoadNodesPerShardsCalled(nodes) - } + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } - if nodes == nil { - return sharding.ErrNilInputNodesMap - } + if nodes == nil { + return sharding.ErrNilInputNodesMap + } - ncm.Validators = nodes + ncm.Validators = nodes - return nil + return nil } func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( - randomess []byte, - round uint64, - shardId uint32, + randomess []byte, + round uint64, + shardId uint32, ) ([]sharding.Validator, error) { - var consensusSize uint32 + var consensusSize uint32 - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) - } + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } - if ncm.ShardId == sharding.MetachainShardId { - consensusSize = ncm.MetaConsensusSize - } else { - consensusSize = ncm.ShardConsensusSize - } + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } - if randomess == nil { - return nil, sharding.ErrNilRandomness - } + if randomess == nil { + return nil, sharding.ErrNilRandomness + } - validatorsGroup := make([]sharding.Validator, 0) + validatorsGroup := make([]sharding.Validator, 0) - for i := uint32(0); i < consensusSize; i++ { - validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) - } + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } - return validatorsGroup, nil + return validatorsGroup, nil } func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { - if ncm.GetValidatorWithPublicKeyCalled != nil { - return ncm.GetValidatorWithPublicKeyCalled(publicKey) - } - - if publicKey == nil { - return nil, 0, sharding.ErrNilPubKey - } - - for shardId, shardEligible := range ncm.Validators { - for i := 0; i < len(shardEligible); i++ { - if bytes.Equal(publicKey, shardEligible[i].PubKey()) { - return shardEligible[i], shardId, nil - } - } - } - - return nil, 0, sharding.ErrValidatorNotFound + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound } func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { diff --git a/process/mock/poolsHolderStub.go b/process/mock/poolsHolderStub.go index df7a40999d3..d189b57d055 100644 --- a/process/mock/poolsHolderStub.go +++ b/process/mock/poolsHolderStub.go @@ -50,8 +50,8 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { - if phs == nil { - return true - } - return false + if phs == nil { + return true + } + return false } diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go index 1746f5108d9..883879e6b56 100644 --- a/process/mock/rewardTxProcessorMock.go +++ b/process/mock/rewardTxProcessorMock.go @@ -1,24 +1,24 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" ) type RewardTxProcessorMock struct { - ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error + ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error } func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if scrp.ProcessRewardTransactionCalled == nil { - return nil - } + if scrp.ProcessRewardTransactionCalled == nil { + return nil + } - return scrp.ProcessRewardTransactionCalled(rTx) + return scrp.ProcessRewardTransactionCalled(rTx) } func (scrp *RewardTxProcessorMock) IsInterfaceNil() bool { - if scrp == nil { - return true - } - return false + if scrp == nil { + return true + } + return false } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 1b59991984d..cf38adf5835 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -58,4 +58,4 @@ func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/process/mock/txTypeHandlerMock.go b/process/mock/txTypeHandlerMock.go index 06b13d78677..76cedc35360 100644 --- a/process/mock/txTypeHandlerMock.go +++ b/process/mock/txTypeHandlerMock.go @@ -23,4 +23,4 @@ func (th *TxTypeHandlerMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index ad2fcc0d849..f7e578bded0 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -1,101 +1,101 @@ package rewardTransaction import ( - "math/big" + "math/big" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) type rewardTxProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator } // NewRewardTxProcessor creates a rewardTxProcessor instance // TODO: add unit tests func NewRewardTxProcessor( - accountsDB state.AccountsAdapter, - adrConv state.AddressConverter, - coordinator sharding.Coordinator, + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, ) (*rewardTxProcessor, error) { - if accountsDB == nil { - return nil, process.ErrNilAccountsAdapter - } - if adrConv == nil { - return nil, process.ErrNilAddressConverter - } - if coordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - return &rewardTxProcessor{ - accounts: accountsDB, - adrConv: adrConv, - shardCoordinator: coordinator, - }, nil + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + }, nil } func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { - adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) - if err != nil { - return nil, err - } - - shardForCurrentNode := rtp.shardCoordinator.SelfId() - shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + adrSrc, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForSrc := rtp.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } // ProcessRewardTransaction updates the account state from the reward transaction func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { - if rTx == nil { - return process.ErrNilRewardTransaction - } - - accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) - if err != nil { - return err - } - if accHandler == nil || accHandler.IsInterfaceNil() { - return process.ErrNilSCDestAccount - } - - rewardAcc, ok := accHandler.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if rTx.Value == nil { - return process.ErrNilValueFromRewardTransaction - } - - operation := big.NewInt(0) - operation = operation.Add(rTx.Value, rewardAcc.Balance) - err = rewardAcc.SetBalanceWithJournal(operation) - if err != nil { - return err - } - - return nil + if rTx == nil { + return process.ErrNilRewardTransaction + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + if accHandler == nil || accHandler.IsInterfaceNil() { + return process.ErrNilSCDestAccount + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + if err != nil { + return err + } + + return nil } // IsInterfaceNil returns true if there is no value under the interface func (rtp *rewardTxProcessor) IsInterfaceNil() bool { - if rtp == nil { - return true - } - return false + if rtp == nil { + return true + } + return false } diff --git a/process/transaction/process.go b/process/transaction/process.go index 798109a3e11..1ff05165f06 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -1,19 +1,19 @@ package transaction import ( - "bytes" - "math/big" - "sync" - - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" + "bytes" + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() @@ -28,391 +28,391 @@ var mutTxFee sync.RWMutex // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { - accounts state.AccountsAdapter - adrConv state.AddressConverter - hasher hashing.Hasher - scProcessor process.SmartContractProcessor - marshalizer marshal.Marshalizer - rewardTxHandler process.UnsignedTxHandler - shardCoordinator sharding.Coordinator - txTypeHandler process.TxTypeHandler + accounts state.AccountsAdapter + adrConv state.AddressConverter + hasher hashing.Hasher + scProcessor process.SmartContractProcessor + marshalizer marshal.Marshalizer + rewardTxHandler process.UnsignedTxHandler + shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine func NewTxProcessor( - accounts state.AccountsAdapter, - hasher hashing.Hasher, - addressConv state.AddressConverter, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - scProcessor process.SmartContractProcessor, - rewardTxHandler process.UnsignedTxHandler, - txTypeHandler process.TxTypeHandler, + accounts state.AccountsAdapter, + hasher hashing.Hasher, + addressConv state.AddressConverter, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + scProcessor process.SmartContractProcessor, + rewardTxHandler process.UnsignedTxHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { - if accounts == nil || accounts.IsInterfaceNil() { - return nil, process.ErrNilAccountsAdapter - } - if hasher == nil || hasher.IsInterfaceNil() { - return nil, process.ErrNilHasher - } - if addressConv == nil || addressConv.IsInterfaceNil() { - return nil, process.ErrNilAddressConverter - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return nil, process.ErrNilMarshalizer - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return nil, process.ErrNilShardCoordinator - } - if scProcessor == nil || scProcessor.IsInterfaceNil() { - return nil, process.ErrNilSmartContractProcessor - } - if rewardTxHandler == nil || rewardTxHandler.IsInterfaceNil() { - return nil, process.ErrNilUnsignedTxHandler - } - if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { - return nil, process.ErrNilTxTypeHandler - } - - return &txProcessor{ - accounts: accounts, - hasher: hasher, - adrConv: addressConv, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - scProcessor: scProcessor, - rewardTxHandler: rewardTxHandler, - txTypeHandler: txTypeHandler, - }, nil + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addressConv == nil || addressConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if scProcessor == nil || scProcessor.IsInterfaceNil() { + return nil, process.ErrNilSmartContractProcessor + } + if rewardTxHandler == nil || rewardTxHandler.IsInterfaceNil() { + return nil, process.ErrNilUnsignedTxHandler + } + if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { + return nil, process.ErrNilTxTypeHandler + } + + return &txProcessor{ + accounts: accounts, + hasher: hasher, + adrConv: addressConv, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + scProcessor: scProcessor, + rewardTxHandler: rewardTxHandler, + txTypeHandler: txTypeHandler, + }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint64) error { - if tx == nil || tx.IsInterfaceNil() { - return process.ErrNilTransaction - } - - adrSrc, adrDst, err := txProc.getAddresses(tx) - if err != nil { - return err - } - - acntSnd, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.checkTxValues(tx, acntSnd) - if err != nil { - return err - } - - txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) - if err != nil { - return err - } - - switch txType { - case process.MoveBalance: - return txProc.processMoveBalance(tx, adrSrc, adrDst) - case process.SCDeployment: - return txProc.processSCDeployment(tx, adrSrc, roundIndex) - case process.SCInvoking: - return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) - case process.RewardTx: - return txProc.processRewardTx(tx, adrSrc) - } - - return process.ErrWrongTransaction + if tx == nil || tx.IsInterfaceNil() { + return process.ErrNilTransaction + } + + adrSrc, adrDst, err := txProc.getAddresses(tx) + if err != nil { + return err + } + + acntSnd, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.checkTxValues(tx, acntSnd) + if err != nil { + return err + } + + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) + if err != nil { + return err + } + + switch txType { + case process.MoveBalance: + return txProc.processMoveBalance(tx, adrSrc, adrDst) + case process.SCDeployment: + return txProc.processSCDeployment(tx, adrSrc, roundIndex) + case process.SCInvoking: + return txProc.processSCInvoking(tx, adrSrc, adrDst, roundIndex) + case process.RewardTx: + return txProc.processRewardTx(tx, adrSrc) + } + + return process.ErrWrongTransaction } func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { - if acntSnd == nil { - return nil, nil - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - - txDataLen := int64(len(tx.Data)) - mutTxFee.RLock() - minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) - mutTxFee.RUnlock() - - if minFee.Cmp(cost) > 0 { - return nil, process.ErrNotEnoughFeeInTransactions - } - - if acntSnd.Balance.Cmp(cost) < 0 { - return nil, process.ErrInsufficientFunds - } - - operation := big.NewInt(0) - err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) - if err != nil { - return nil, err - } - - return cost, nil + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + mutTxFee.RLock() + minFee := big.NewInt(0) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) + mutTxFee.RUnlock() + + if minFee.Cmp(cost) > 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + return cost, nil } func (txProc *txProcessor) processRewardTx( - tx data.TransactionHandler, - adrSrc state.AddressContainer, + tx data.TransactionHandler, + adrSrc state.AddressContainer, ) error { - rTx, ok := tx.(*rewardTx.RewardTx) - if !ok { - return process.ErrWrongTypeAssertion - } - - acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) - if err != nil { - return err - } - - // is sender address in node shard - if acntSrc != nil { - op := big.NewInt(0) - err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) - if err != nil { - return err - } - } - - if rTx.ShardId == txProc.shardCoordinator.SelfId() { - txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) - } - - return nil + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + acntSrc, _, err := txProc.getAccounts(adrSrc, adrSrc) + if err != nil { + return err + } + + // is sender address in node shard + if acntSrc != nil { + op := big.NewInt(0) + err := acntSrc.SetBalanceWithJournal(op.Add(acntSrc.Balance, rTx.Value)) + if err != nil { + return err + } + } + + if rTx.ShardId == txProc.shardCoordinator.SelfId() { + txProc.rewardTxHandler.AddRewardTxFromBlock(rTx) + } + + return nil } func (txProc *txProcessor) processMoveBalance( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } - txFee, err := txProc.processTxFee(tx, acntSrc) - if err != nil { - return err - } + txFee, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } - value := tx.Value + value := tx.Value - err = txProc.moveBalances(acntSrc, acntDst, value) - if err != nil { - return err - } + err = txProc.moveBalances(acntSrc, acntDst, value) + if err != nil { + return err + } - // is sender address in node shard - if acntSrc != nil { - err = txProc.increaseNonce(acntSrc) - if err != nil { - return err - } - } + // is sender address in node shard + if acntSrc != nil { + err = txProc.increaseNonce(acntSrc) + if err != nil { + return err + } + } - txProc.rewardTxHandler.ProcessTransactionFee(txFee) + txProc.rewardTxHandler.ProcessTransactionFee(txFee) - return nil + return nil } func (txProc *txProcessor) processSCDeployment( - tx *transaction.Transaction, - adrSrc state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, err := txProc.getAccountFromAddress(adrSrc) - if err != nil { - return err - } - - err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, err := txProc.getAccountFromAddress(adrSrc) + if err != nil { + return err + } + + err = txProc.scProcessor.DeploySmartContract(tx, acntSrc, roundIndex) + return err } func (txProc *txProcessor) processSCInvoking( - tx *transaction.Transaction, - adrSrc, adrDst state.AddressContainer, - roundIndex uint64, + tx *transaction.Transaction, + adrSrc, adrDst state.AddressContainer, + roundIndex uint64, ) error { - // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil - // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return err - } - - err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) - return err + // getAccounts returns acntSrc not nil if the adrSrc is in the node shard, the same, acntDst will be not nil + // if adrDst is in the node shard. If an error occurs it will be signaled in err variable. + acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) + if err != nil { + return err + } + + err = txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst, roundIndex) + return err } func (txProc *txProcessor) getAddresses( - tx *transaction.Transaction, + tx *transaction.Transaction, ) (state.AddressContainer, state.AddressContainer, error) { - //for now we assume that the address = public key - adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) - if err != nil { - return nil, nil, err - } - - adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) - if err != nil { - return nil, nil, err - } - - return adrSrc, adrDst, nil + //for now we assume that the address = public key + adrSrc, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.SndAddr) + if err != nil { + return nil, nil, err + } + + adrDst, err := txProc.adrConv.CreateAddressFromPublicKeyBytes(tx.RcvAddr) + if err != nil { + return nil, nil, err + } + + return adrSrc, adrDst, nil } func (txProc *txProcessor) getAccounts( - adrSrc, adrDst state.AddressContainer, + adrSrc, adrDst state.AddressContainer, ) (*state.Account, *state.Account, error) { - var acntSrc, acntDst *state.Account + var acntSrc, acntDst *state.Account - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - shardForDst := txProc.shardCoordinator.ComputeId(adrDst) + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + shardForDst := txProc.shardCoordinator.ComputeId(adrDst) - srcInShard := shardForSrc == shardForCurrentNode - dstInShard := shardForDst == shardForCurrentNode + srcInShard := shardForSrc == shardForCurrentNode + dstInShard := shardForDst == shardForCurrentNode - if srcInShard && adrSrc == nil || - dstInShard && adrDst == nil { - return nil, nil, process.ErrNilAddressContainer - } + if srcInShard && adrSrc == nil || + dstInShard && adrDst == nil { + return nil, nil, process.ErrNilAddressContainer + } - if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { - acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if bytes.Equal(adrSrc.Bytes(), adrDst.Bytes()) { + acntWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - return account, account, nil - } + return account, account, nil + } - if srcInShard { - acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, nil, err - } + if srcInShard { + acntSrcWrp, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, nil, err + } - account, ok := acntSrcWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntSrcWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntSrc = account - } + acntSrc = account + } - if dstInShard { - acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) - if err != nil { - return nil, nil, err - } + if dstInShard { + acntDstWrp, err := txProc.accounts.GetAccountWithJournal(adrDst) + if err != nil { + return nil, nil, err + } - account, ok := acntDstWrp.(*state.Account) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion - } + account, ok := acntDstWrp.(*state.Account) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } - acntDst = account - } + acntDst = account + } - return acntSrc, acntDst, nil + return acntSrc, acntDst, nil } func (txProc *txProcessor) getAccountFromAddress(adrSrc state.AddressContainer) (state.AccountHandler, error) { - shardForCurrentNode := txProc.shardCoordinator.SelfId() - shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) - if shardForCurrentNode != shardForSrc { - return nil, nil - } - - acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) - if err != nil { - return nil, err - } - - return acnt, nil + shardForCurrentNode := txProc.shardCoordinator.SelfId() + shardForSrc := txProc.shardCoordinator.ComputeId(adrSrc) + if shardForCurrentNode != shardForSrc { + return nil, nil + } + + acnt, err := txProc.accounts.GetAccountWithJournal(adrSrc) + if err != nil { + return nil, err + } + + return acnt, nil } func (txProc *txProcessor) checkTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { - // transaction was already done at sender shard - return nil - } - - if acntSnd.GetNonce() < tx.Nonce { - return process.ErrHigherNonceInTransaction - } - if acntSnd.GetNonce() > tx.Nonce { - return process.ErrLowerNonceInTransaction - } - - cost := big.NewInt(0) - cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) - cost = cost.Add(cost, tx.Value) - - if cost.Cmp(big.NewInt(0)) == 0 { - return nil - } - - stAcc, ok := acntSnd.(*state.Account) - if !ok { - return process.ErrWrongTypeAssertion - } - - if stAcc.Balance.Cmp(cost) < 0 { - return process.ErrInsufficientFunds - } - - return nil + if acntSnd == nil || acntSnd.IsInterfaceNil() { + // transaction was already done at sender shard + return nil + } + + if acntSnd.GetNonce() < tx.Nonce { + return process.ErrHigherNonceInTransaction + } + if acntSnd.GetNonce() > tx.Nonce { + return process.ErrLowerNonceInTransaction + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + cost = cost.Add(cost, tx.Value) + + if cost.Cmp(big.NewInt(0)) == 0 { + return nil + } + + stAcc, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + + return nil } func (txProc *txProcessor) moveBalances(acntSrc, acntDst *state.Account, - value *big.Int, + value *big.Int, ) error { - operation1 := big.NewInt(0) - operation2 := big.NewInt(0) - - // is sender address in node shard - if acntSrc != nil { - err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) - if err != nil { - return err - } - } - - // is receiver address in node shard - if acntDst != nil { - err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) - if err != nil { - return err - } - } - - return nil + operation1 := big.NewInt(0) + operation2 := big.NewInt(0) + + // is sender address in node shard + if acntSrc != nil { + err := acntSrc.SetBalanceWithJournal(operation1.Sub(acntSrc.Balance, value)) + if err != nil { + return err + } + } + + // is receiver address in node shard + if acntDst != nil { + err := acntDst.SetBalanceWithJournal(operation2.Add(acntDst.Balance, value)) + if err != nil { + return err + } + } + + return nil } func (txProc *txProcessor) increaseNonce(acntSrc *state.Account) error { - return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) + return acntSrc.SetNonceWithJournal(acntSrc.Nonce + 1) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go index 2a917bdb13e..0218936b5c0 100644 --- a/sharding/mock/hasherMock.go +++ b/sharding/mock/hasherMock.go @@ -34,4 +34,4 @@ func (sha *HasherMock) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} diff --git a/sharding/mock/hasherStub.go b/sharding/mock/hasherStub.go index 9589b18a98e..216fc9d9909 100644 --- a/sharding/mock/hasherStub.go +++ b/sharding/mock/hasherStub.go @@ -27,4 +27,4 @@ func (hs *HasherStub) IsInterfaceNil() bool { return true } return false -} \ No newline at end of file +} From a96d3514b49979e5e22c9f498b346b46a5b4454b Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 11 Sep 2019 12:16:49 +0300 Subject: [PATCH 095/234] consensus, process, integrationTests: goimports and rename --- consensus/mock/blockProcessorMock.go | 2 +- .../spos/commonSubround/subroundStartRound.go | 326 +++++++++--------- integrationTests/mock/blockProcessorMock.go | 2 +- .../transaction/interceptedResolvedTx_test.go | 2 +- .../testProcessorNodeWithMultisigner.go | 2 +- node/mock/blockProcessorStub.go | 2 +- process/block/metablock.go | 4 +- process/block/shardblock.go | 6 +- process/interface.go | 2 +- process/mock/blockProcessorMock.go | 2 +- 10 files changed, 175 insertions(+), 175 deletions(-) diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 02e28afe9f5..7d782fe1078 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData(consensusRewardAddresses []string, round uint64) { } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 3272ade3ce5..4dc9a782458 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -1,239 +1,239 @@ package commonSubround import ( - "encoding/hex" - "fmt" - "time" - - "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/statusHandler" + "encoding/hex" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/consensus/spos" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/statusHandler" ) var log = logger.DefaultLogger() // SubroundStartRound defines the data needed by the subround StartRound type SubroundStartRound struct { - *spos.Subround - processingThresholdPercentage int - getSubroundName func(subroundId int) string - executeStoredMessages func() - broadcastUnnotarisedBlocks func() + *spos.Subround + processingThresholdPercentage int + getSubroundName func(subroundId int) string + executeStoredMessages func() + broadcastUnnotarisedBlocks func() - appStatusHandler core.AppStatusHandler + appStatusHandler core.AppStatusHandler } // NewSubroundStartRound creates a SubroundStartRound object func NewSubroundStartRound( - baseSubround *spos.Subround, - extend func(subroundId int), - processingThresholdPercentage int, - getSubroundName func(subroundId int) string, - executeStoredMessages func(), - broadcastUnnotarisedBlocks func(), + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + getSubroundName func(subroundId int) string, + executeStoredMessages func(), + broadcastUnnotarisedBlocks func(), ) (*SubroundStartRound, error) { - err := checkNewSubroundStartRoundParams( - baseSubround, - broadcastUnnotarisedBlocks, - ) - if err != nil { - return nil, err - } - - srStartRound := SubroundStartRound{ - baseSubround, - processingThresholdPercentage, - getSubroundName, - executeStoredMessages, - broadcastUnnotarisedBlocks, - statusHandler.NewNilStatusHandler(), - } - srStartRound.Job = srStartRound.doStartRoundJob - srStartRound.Check = srStartRound.doStartRoundConsensusCheck - srStartRound.Extend = extend - - return &srStartRound, nil + err := checkNewSubroundStartRoundParams( + baseSubround, + broadcastUnnotarisedBlocks, + ) + if err != nil { + return nil, err + } + + srStartRound := SubroundStartRound{ + baseSubround, + processingThresholdPercentage, + getSubroundName, + executeStoredMessages, + broadcastUnnotarisedBlocks, + statusHandler.NewNilStatusHandler(), + } + srStartRound.Job = srStartRound.doStartRoundJob + srStartRound.Check = srStartRound.doStartRoundConsensusCheck + srStartRound.Extend = extend + + return &srStartRound, nil } func checkNewSubroundStartRoundParams( - baseSubround *spos.Subround, - broadcastUnnotarisedBlocks func(), + baseSubround *spos.Subround, + broadcastUnnotarisedBlocks func(), ) error { - if baseSubround == nil { - return spos.ErrNilSubround - } - if baseSubround.ConsensusState == nil { - return spos.ErrNilConsensusState - } - if broadcastUnnotarisedBlocks == nil { - return spos.ErrNilBroadcastUnnotarisedBlocks - } - - err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) - - return err + if baseSubround == nil { + return spos.ErrNilSubround + } + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + if broadcastUnnotarisedBlocks == nil { + return spos.ErrNilBroadcastUnnotarisedBlocks + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err } // SetAppStatusHandler method set appStatusHandler func (sr *SubroundStartRound) SetAppStatusHandler(ash core.AppStatusHandler) error { - if ash == nil || ash.IsInterfaceNil() { - return spos.ErrNilAppStatusHandler - } + if ash == nil || ash.IsInterfaceNil() { + return spos.ErrNilAppStatusHandler + } - sr.appStatusHandler = ash - return nil + sr.appStatusHandler = ash + return nil } // doStartRoundJob method does the job of the subround StartRound func (sr *SubroundStartRound) doStartRoundJob() bool { - sr.ResetConsensusState() - sr.RoundIndex = sr.Rounder().Index() - sr.RoundTimeStamp = sr.Rounder().TimeStamp() - return true + sr.ResetConsensusState() + sr.RoundIndex = sr.Rounder().Index() + sr.RoundTimeStamp = sr.Rounder().TimeStamp() + return true } // doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound func (sr *SubroundStartRound) doStartRoundConsensusCheck() bool { - if sr.RoundCanceled { - return false - } + if sr.RoundCanceled { + return false + } - if sr.Status(sr.Current()) == spos.SsFinished { - return true - } + if sr.Status(sr.Current()) == spos.SsFinished { + return true + } - if sr.initCurrentRound() { - return true - } + if sr.initCurrentRound() { + return true + } - return false + return false } func (sr *SubroundStartRound) initCurrentRound() bool { - if sr.BootStrapper().ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism - return false - } - sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "") + if sr.BootStrapper().ShouldSync() { // if node is not synchronized yet, it has to continue the bootstrapping mechanism + return false + } + sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "") - err := sr.generateNextConsensusGroup(sr.Rounder().Index()) - if err != nil { - log.Error(err.Error()) + err := sr.generateNextConsensusGroup(sr.Rounder().Index()) + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - leader, err := sr.GetLeader() - if err != nil { - log.Error(err.Error()) + leader, err := sr.GetLeader() + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - msg := "" - if leader == sr.SelfPubKey() { - sr.appStatusHandler.Increment(core.MetricCountLeader) - sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "proposed") - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "proposer") - msg = " (my turn)" - } + msg := "" + if leader == sr.SelfPubKey() { + sr.appStatusHandler.Increment(core.MetricCountLeader) + sr.appStatusHandler.SetStringValue(core.MetricConsensusRoundState, "proposed") + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "proposer") + msg = " (my turn)" + } - log.Info(fmt.Sprintf("%sStep 0: preparing for this round with leader %s%s\n", - sr.SyncTimer().FormattedCurrentTime(), core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), msg)) + log.Info(fmt.Sprintf("%sStep 0: preparing for this round with leader %s%s\n", + sr.SyncTimer().FormattedCurrentTime(), core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), msg)) - pubKeys := sr.ConsensusGroup() + pubKeys := sr.ConsensusGroup() - selfIndex, err := sr.SelfConsensusGroupIndex() - if err != nil { - log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", - sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) + selfIndex, err := sr.SelfConsensusGroupIndex() + if err != nil { + log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", + sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) - sr.RoundCanceled = true + sr.RoundCanceled = true - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "not in consensus group") + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "not in consensus group") - return false - } + return false + } - sr.appStatusHandler.Increment(core.MetricCountConsensus) - sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "participant") + sr.appStatusHandler.Increment(core.MetricCountConsensus) + sr.appStatusHandler.SetStringValue(core.MetricConsensusState, "participant") - err = sr.MultiSigner().Reset(pubKeys, uint16(selfIndex)) - if err != nil { - log.Error(err.Error()) + err = sr.MultiSigner().Reset(pubKeys, uint16(selfIndex)) + if err != nil { + log.Error(err.Error()) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - startTime := time.Time{} - startTime = sr.RoundTimeStamp - maxTime := sr.Rounder().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 - if sr.Rounder().RemainingTime(startTime, maxTime) < 0 { - log.Info(fmt.Sprintf("%scanceled round %d in subround %s, time is out\n", - sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) + startTime := time.Time{} + startTime = sr.RoundTimeStamp + maxTime := sr.Rounder().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.Rounder().RemainingTime(startTime, maxTime) < 0 { + log.Info(fmt.Sprintf("%scanceled round %d in subround %s, time is out\n", + sr.SyncTimer().FormattedCurrentTime(), sr.Rounder().Index(), sr.getSubroundName(sr.Current()))) - sr.RoundCanceled = true + sr.RoundCanceled = true - return false - } + return false + } - sr.SetStatus(sr.Current(), spos.SsFinished) + sr.SetStatus(sr.Current(), spos.SsFinished) - if leader == sr.SelfPubKey() { - //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary - } + if leader == sr.SelfPubKey() { + //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary + } - // execute stored messages which were received in this new round but before this initialisation - go sr.executeStoredMessages() + // execute stored messages which were received in this new round but before this initialisation + go sr.executeStoredMessages() - return true + return true } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { - currentHeader := sr.Blockchain().GetCurrentBlockHeader() - if currentHeader == nil { - currentHeader = sr.Blockchain().GetGenesisHeader() - if currentHeader == nil { - return spos.ErrNilHeader - } - } + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if currentHeader == nil { + currentHeader = sr.Blockchain().GetGenesisHeader() + if currentHeader == nil { + return spos.ErrNilHeader + } + } - randomSeed := currentHeader.GetRandSeed() + randomSeed := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", - core.ToB64(randomSeed)), - ) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", + core.ToB64(randomSeed)), + ) - shardId := sr.ShardCoordinator().SelfId() + shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( - randomSeed, - uint64(sr.RoundIndex), - shardId, - sr.NodesCoordinator(), - ) - if err != nil { - return err - } + nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + ) + if err != nil { + return err + } - log.Info(fmt.Sprintf("consensus group for round %d is formed by next validators:\n", - roundIndex)) + log.Info(fmt.Sprintf("consensus group for round %d is formed by next validators:\n", + roundIndex)) - for i := 0; i < len(nextConsensusGroup); i++ { - log.Info(fmt.Sprintf("%s", core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i]))))) - } + for i := 0; i < len(nextConsensusGroup); i++ { + log.Info(fmt.Sprintf("%s", core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i]))))) + } - log.Info(fmt.Sprintf("\n")) + log.Info(fmt.Sprintf("\n")) - sr.SetConsensusGroup(nextConsensusGroup) + sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusRewardAddresses(rewardsAddresses, uint64(sr.RoundIndex)) + sr.BlockProcessor().SetConsensusData(rewardsAddresses, uint64(sr.RoundIndex)) - return nil + return nil } diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 67338af603c..95ff3d78bbf 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -93,7 +93,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData(consensusRewardAddresses []string, round uint64) { if blProcMock.SetConsensusRewardAddressesCalled != nil { blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) } diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 64e527f794c..76a0bbfbd7a 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -2,13 +2,13 @@ package transaction import ( "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "reflect" "testing" "time" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process" diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index b2853b81148..d99b7f7bd07 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -125,7 +125,7 @@ func ProposeBlockWithConsensusSignature( // set the consensus reward addresses for _, node := range nodesMap[shardId] { - node.BlockProcessor.SetConsensusRewardAddresses(adddresses, round) + node.BlockProcessor.SetConsensusData(adddresses, round) } consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 64560dbc1e7..04a97928844 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -72,7 +72,7 @@ func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorStub) SetConsensusRewardAddresses([]string, uint64) { +func (blProcMock BlockProcessorStub) SetConsensusData([]string, uint64) { panic("implement me") } diff --git a/process/block/metablock.go b/process/block/metablock.go index 6abb60da97b..350ec578070 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -224,8 +224,8 @@ func (mp *metaProcessor) ProcessBlock( return nil } -// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (mp *metaProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { +// SetConsensusData - sets the reward addresses for the current consensus group +func (mp *metaProcessor) SetConsensusData(consensusRewardAddresses []string, round uint64) { // TODO set the reward addresses for metachain consensus nodes } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 06a519011d7..0400a1fb57d 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -199,7 +199,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - sp.SetConsensusRewardAddresses(consensusAddresses, headerHandler.GetRound()) + sp.SetConsensusData(consensusAddresses, headerHandler.GetRound()) sp.txCoordinator.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -268,8 +268,8 @@ func (sp *shardProcessor) ProcessBlock( return nil } -// SetConsensusRewardAddresses - sets the reward addresses for the current consensus group -func (sp *shardProcessor) SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) { +// SetConsensusData - sets the reward addresses for the current consensus group +func (sp *shardProcessor) SetConsensusData(consensusRewardAddresses []string, round uint64) { sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) } diff --git a/process/interface.go b/process/interface.go index 53059f4c4d5..3f9a9d8e498 100644 --- a/process/interface.go +++ b/process/interface.go @@ -162,7 +162,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddresses(consensusRewardAddresses []string, round uint64) + SetConsensusData(consensusRewardAddresses []string, round uint64) IsInterfaceNil() bool } diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 3e3687761d0..a2127363b25 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusRewardAddresses([]string, uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData([]string, uint64) { panic("implement me") } From 116b14ec7e9a141181163210306341e669cbcb76 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 12 Sep 2019 14:06:50 +0300 Subject: [PATCH 096/234] EN-3887 : clean txs pools branch --- go.sum | 2 - process/block/poolsclean.go | 127 ++++++++++++++++++ process/block/poolsclean_test.go | 216 +++++++++++++++++++++++++++++++ process/block/shardblock.go | 41 ++++-- process/interface.go | 6 + 5 files changed, 382 insertions(+), 10 deletions(-) create mode 100644 process/block/poolsclean.go create mode 100644 process/block/poolsclean_test.go diff --git a/go.sum b/go.sum index 5f960aa4f05..7feecd1ae9f 100644 --- a/go.sum +++ b/go.sum @@ -476,8 +476,6 @@ golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= diff --git a/process/block/poolsclean.go b/process/block/poolsclean.go new file mode 100644 index 00000000000..ac6b20ff6c7 --- /dev/null +++ b/process/block/poolsclean.go @@ -0,0 +1,127 @@ +package block + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// TxPoolsCleaner represents a pools cleaner that check if a transaction should be in pool +type TxPoolsCleaner struct { + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + dataPool dataRetriever.PoolsHolder + addrConverter *addressConverters.PlainAddressConverter + numRemovedTxs uint64 + mutNumRemovedTxs sync.RWMutex +} + +// NewTxsPoolsCleaner will return a new transaction pools cleaner +func NewTxsPoolsCleaner( + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + dataPool dataRetriever.PoolsHolder, +) (*TxPoolsCleaner, error) { + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if dataPool == nil { + return nil, process.ErrNilDataPoolHolder + } + transactionPool := dataPool.Transactions() + if transactionPool == nil { + return nil, process.ErrNilTransactionPool + } + addrConverter, err := addressConverters.NewPlainAddressConverter(32, "0x") + if err != nil { + return nil, err + } + + return &TxPoolsCleaner{ + accounts: accounts, + shardCoordinator: shardCoordinator, + dataPool: dataPool, + addrConverter: addrConverter, + numRemovedTxs: 0, + }, nil +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { + if haveTime == nil { + return process.ErrNilHaveTimeHandler + } + + shardId := tpc.shardCoordinator.SelfId() + transactions := tpc.dataPool.Transactions() + numOfShards := tpc.shardCoordinator.NumberOfShards() + + for destShardId := uint32(0); destShardId < numOfShards; destShardId++ { + cacherId := process.ShardCacherIdentifier(shardId, destShardId) + txsPool := transactions.ShardDataStore(cacherId) + + for _, key := range txsPool.Keys() { + if !haveTime() { + return nil + } + + obj, ok := txsPool.Peek(key) + if !ok { + continue + } + + tx, ok := obj.(*transaction.Transaction) + if !ok { + continue + } + + sndAddr := tx.GetSndAddress() + addr, err := tpc.addrConverter.CreateAddressFromPublicKeyBytes(sndAddr) + if err != nil { + txsPool.Remove(key) + tpc.incrementNumRemovedTxs() + continue + } + + accountHandler, err := tpc.accounts.GetExistingAccount(addr) + if err != nil { + txsPool.Remove(key) + tpc.incrementNumRemovedTxs() + continue + } + + accountNonce := accountHandler.GetNonce() + txNonce := tx.Nonce + lowerNonceInTx := txNonce < accountNonce + if lowerNonceInTx { + txsPool.Remove(key) + tpc.incrementNumRemovedTxs() + } + } + } + + return nil +} + +func (tpc *TxPoolsCleaner) incrementNumRemovedTxs() { + tpc.mutNumRemovedTxs.Lock() + tpc.numRemovedTxs++ + tpc.mutNumRemovedTxs.Unlock() +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleaner) NumRemovedTxs() uint64 { + tpc.mutNumRemovedTxs.Lock() + defer tpc.mutNumRemovedTxs.Unlock() + + return tpc.numRemovedTxs +} diff --git a/process/block/poolsclean_test.go b/process/block/poolsclean_test.go new file mode 100644 index 00000000000..ec43f2f4796 --- /dev/null +++ b/process/block/poolsclean_test.go @@ -0,0 +1,216 @@ +package block_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func getAccAdapter(nonce uint64, balance *big.Int) *mock.AccountsStub { + accDB := &mock.AccountsStub{} + accDB.GetExistingAccountCalled = func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return &state.Account{Nonce: nonce, Balance: balance}, nil + } + + return accDB +} + +func initDataPoolTransactions() *mock.PoolsHolderStub { + return &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + switch string(key) { + case "key1": + time.Sleep(time.Second) + return &transaction.Transaction{Nonce: 10}, true + case "key2": + return &transaction.Transaction{ + Nonce: 10, + SndAddr: []byte("address_address_address_address_"), + }, true + case "key3": + return &smartContractResult.SmartContractResult{}, true + default: + return nil, false + } + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2"), []byte("key3"), []byte("key4")} + }, + LenCalled: func() int { + return 0 + }, + RemoveCalled: func(key []byte) { + return + }, + } + }, + } + }, + } +} + +func TestNewTxsPoolsCleaner_NilAccountsShouldErr(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + txsPoolsCleaner, err := block.NewTxsPoolsCleaner(nil, shardCoordinator, tdp) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewTxsPoolsCleaner_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + tdp := initDataPool([]byte("test")) + txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, nil, tdp) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewTxsPoolsCleaner_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, nil) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestNewTxsPoolsCleaner_NilTransactionPoolShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return nil + }, + } + txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilTransactionPool, err) +} + +func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + assert.NotNil(t, txsPoolsCleaner) + assert.Nil(t, err) +} + +func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolTransactions() + txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime).Seconds() < 1.0 + } + + err := txsPoolsCleaner.Clean(haveTime) + assert.Nil(t, err) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, uint64(1), numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { + t.Parallel() + + accounts := &mock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return nil, state.ErrAccNotFound + }, + } + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolTransactions() + txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime).Seconds() < 2.0 + } + + err := txsPoolsCleaner.Clean(haveTime) + assert.Nil(t, err) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, uint64(2), numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { + t.Parallel() + + nonce := uint64(11) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolTransactions() + txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime).Seconds() < 2.0 + } + + err := txsPoolsCleaner.Clean(haveTime) + assert.Nil(t, err) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, uint64(2), numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(11) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolTransactions() + txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + + err := txsPoolsCleaner.Clean(nil) + assert.Equal(t, process.ErrNilHaveTimeHandler, err) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index e90410d79b0..4a7f6e546c8 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -22,6 +22,8 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler" ) +const cleaningTime = 1.0 + // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor @@ -41,6 +43,8 @@ type shardProcessor struct { core serviceContainer.Core txCoordinator process.TransactionCoordinator txCounter *transactionCounter + + txsPoolsCleaner process.PoolsCleaner } // NewShardProcessor creates a new shardProcessor object @@ -107,15 +111,20 @@ func NewShardProcessor( return nil, err } - sp := shardProcessor{ - core: core, - baseProcessor: base, - dataPool: dataPool, - blocksTracker: blocksTracker, - txCoordinator: txCoordinator, - txCounter: NewTransactionCounter(), + txsPoolsCleaner, err := NewTxsPoolsCleaner(accounts, shardCoordinator, dataPool) + if err != nil { + return nil, err } + sp := shardProcessor{ + core: core, + baseProcessor: base, + dataPool: dataPool, + blocksTracker: blocksTracker, + txCoordinator: txCoordinator, + txCounter: NewTransactionCounter(), + txsPoolsCleaner: txsPoolsCleaner, + } sp.chRcvAllMetaHdrs = make(chan bool) transactionPool := sp.dataPool.Transactions() @@ -698,9 +707,10 @@ func (sp *shardProcessor) CommitBlock( } chainHandler.SetCurrentBlockHeaderHash(headerHash) - sp.indexBlockIfNeeded(bodyHandler, headerHandler) + sp.cleanTxsPools() + // write data to log go sp.txCounter.displayLogInfo( header, @@ -716,6 +726,21 @@ func (sp *shardProcessor) CommitBlock( return nil } +func (sp *shardProcessor) cleanTxsPools() { + go func() { + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime).Seconds() < cleaningTime + } + + errW := sp.txsPoolsCleaner.Clean(haveTime) + log.LogIfError(errW) + + numRemovedTxs := sp.txsPoolsCleaner.NumRemovedTxs() + log.Info(fmt.Sprintf("Total txs removed from pools with clean mechanism %d", numRemovedTxs)) + }() +} + // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain(round uint64) (*block.Header, []byte, error) { highestNonceOwnShIdHdr := &block.Header{} diff --git a/process/interface.go b/process/interface.go index 9c8cc8a9409..9383ac54168 100644 --- a/process/interface.go +++ b/process/interface.go @@ -356,3 +356,9 @@ type TxValidatorHandler interface { SenderAddress() state.AddressContainer TotalValue() *big.Int } + +// PoolsCleaner define the functionality that is needed for a pools cleaner +type PoolsCleaner interface { + Clean(haveTime func() bool) error + NumRemovedTxs() uint64 +} From 5e645bd38b4fd9162c4e590d62c05fb34a296872 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 13 Sep 2019 10:19:58 +0300 Subject: [PATCH 097/234] EN-3887 : modify tests --- process/block/poolsclean_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/process/block/poolsclean_test.go b/process/block/poolsclean_test.go index ec43f2f4796..efc18e8bc50 100644 --- a/process/block/poolsclean_test.go +++ b/process/block/poolsclean_test.go @@ -136,6 +136,7 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { t.Parallel() + cleanDurationSeconds := 1.0 nonce := uint64(1) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) @@ -145,7 +146,7 @@ func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { startTime := time.Now() haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < 1.0 + return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds } err := txsPoolsCleaner.Clean(haveTime) @@ -158,6 +159,7 @@ func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { t.Parallel() + cleanDurationSeconds := 2.0 accounts := &mock.AccountsStub{ GetExistingAccountCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { return nil, state.ErrAccNotFound @@ -169,7 +171,7 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { startTime := time.Now() haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < 2.0 + return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds } err := txsPoolsCleaner.Clean(haveTime) @@ -182,6 +184,7 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { t.Parallel() + cleanDurationSeconds := 2.0 nonce := uint64(11) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) @@ -191,7 +194,7 @@ func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { startTime := time.Now() haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < 2.0 + return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds } err := txsPoolsCleaner.Clean(haveTime) From 26673c1363277b7b54d7cee01bfba9612bebdffb Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 13 Sep 2019 17:44:13 +0300 Subject: [PATCH 098/234] process, integrationTests: refactor transaction coordinator and rewards preprocessor --- cmd/node/factory/structs.go | 6 + data/address/specialAddresses.go | 4 +- .../shard/resolversContainerFactory.go | 1 + integrationTests/mock/blockProcessorMock.go | 30 ++-- .../smartContract/testInitilalizer.go | 2 + .../interceptedResolvedBulkTx_test.go | 2 +- .../transaction/interceptedResolvedTx_test.go | 2 +- integrationTests/testInitializer.go | 78 ++++----- integrationTests/testProcessorNode.go | 4 +- process/block/displayBlock.go | 2 +- process/block/metablock.go | 2 +- .../block/preprocess/rewardTxPreProcessor.go | 61 ++++++- process/block/preprocess/rewardsHandler.go | 13 +- .../block/preprocess/rewardsHandler_test.go | 5 +- .../block/preprocess/smartContractResults.go | 15 +- .../preprocess/smartContractResults_test.go | 2 +- process/block/preprocess/transactions.go | 50 +++++- process/block/preprocess/transactions_test.go | 2 +- process/block/shardblock_test.go | 13 +- process/coordinator/process.go | 159 +++++------------- process/coordinator/process_test.go | 9 + process/errors.go | 10 +- .../shard/preProcessorsContainerFactory.go | 7 + .../preProcessorsContainerFactory_test.go | 16 ++ process/interface.go | 9 +- process/mock/preprocessorMock.go | 19 ++- .../interceptedRewardTransaction.go | 8 +- process/rewardTransaction/interceptor.go | 14 +- 28 files changed, 329 insertions(+), 216 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 6606f9814b3..5194bf95ea6 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1543,6 +1543,11 @@ func newShardBlockProcessorAndTracker( return nil, nil, process.ErrWrongTypeAssertion } + internalTransactionProducer, ok:= rewardsTxInterim.(process.InternalTransactionProducer) + if !ok{ + return nil, nil, process.ErrWrongTypeAssertion + } + scProcessor, err := smartContract.NewSmartContractProcessor( vmContainer, argsParser, @@ -1624,6 +1629,7 @@ func newShardBlockProcessorAndTracker( scProcessor, scProcessor, rewardsTxProcessor, + internalTransactionProducer, ) if err != nil { return nil, nil, err diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 33cc044bf24..06ba4848e9f 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -30,10 +30,10 @@ func NewSpecialAddressHolder( if burnAddress == nil { return nil, data.ErrNilBurnAddress } - if adrConv == nil { + if adrConv == nil || adrConv.IsInterfaceNil() { return nil, data.ErrNilAddressConverter } - if shardCoordinator == nil { + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, data.ErrNilShardCoordinator } diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 8c24880ec80..eba1026d37d 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -108,6 +108,7 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer if err != nil { return nil, err } + err = container.AddMultiple(keys, resolverSlice) if err != nil { return nil, err diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 95ff3d78bbf..b5a75b87b35 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -10,19 +10,19 @@ import ( // BlockProcessorMock mocks the implementation for a blockProcessor type BlockProcessorMock struct { - NrCommitBlockCalled uint32 - Marshalizer marshal.Marshalizer - ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error - CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error - RevertAccountStateCalled func() - CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) - RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) - MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) - DecodeBlockBodyCalled func(dta []byte) data.BodyHandler - DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler - AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusRewardAddressesCalled func([]string) + NrCommitBlockCalled uint32 + Marshalizer marshal.Marshalizer + ProcessBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error + CommitBlockCalled func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler) error + RevertAccountStateCalled func() + CreateBlockCalled func(round uint64, haveTime func() bool) (data.BodyHandler, error) + RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error + CreateBlockHeaderCalled func(body data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) + MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) + DecodeBlockBodyCalled func(dta []byte) data.BodyHandler + DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler + AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusDataCalled func([]string) } // ProcessBlock mocks pocessing a block @@ -94,8 +94,8 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process } func (blProcMock BlockProcessorMock) SetConsensusData(consensusRewardAddresses []string, round uint64) { - if blProcMock.SetConsensusRewardAddressesCalled != nil { - blProcMock.SetConsensusRewardAddressesCalled(consensusRewardAddresses) + if blProcMock.SetConsensusDataCalled != nil { + blProcMock.SetConsensusDataCalled(consensusRewardAddresses) } } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 5849a4af9ba..e9434db265a 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -350,6 +350,7 @@ func createNetNode( scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( accntAdapter, addrConv, @@ -401,6 +402,7 @@ func createNetNode( scProcessor, scProcessor, rewardProcessor, + internalTxProducer, ) container, _ := fact.Create() diff --git a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go index 7e2b1cbacf2..50584434125 100644 --- a/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go +++ b/integrationTests/multiShard/transaction/interceptedResolvedBulkTx_test.go @@ -286,7 +286,7 @@ func TestNode_InMultiShardEnvRequestTxsShouldRequireOnlyFromTheOtherShard(t *tes recvTxs := make(map[int]map[string]struct{}) mutRecvTxs := sync.Mutex{} for i := 0; i < nodesPerShard; i++ { - dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i, uint32(maxShards)) + dPool := integrationTests.CreateRequesterDataPool(t, recvTxs, &mutRecvTxs, i) tn := integrationTests.NewTestProcessorNodeWithCustomDataPool( uint32(maxShards), diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index 76a0bbfbd7a..5b2f935dfce 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -131,7 +131,7 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { time.Sleep(time.Second) - //Step 1. Generate a signed transaction + //Step 1. Generate a reward Transaction tx := rewardTx.RewardTx{ Value: big.NewInt(0), RcvAddr: integrationTests.TestHasher.Compute("receiver"), diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 2c5d2cb9023..a7370f5cbd7 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -88,27 +88,27 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess } // CreateTestShardDataPool creates a test data pool for shard nodes -func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier, nbShards uint32) dataRetriever.PoolsHolder { +func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards}) - rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: nbShards}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: nbShards} + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: 1}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nbShards} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) dPool, _ := dataPool.NewShardedDataPool( @@ -906,6 +906,7 @@ func ComputeAndRequestMissingTransactions( } } +// ComputeAndRequestMissingRewardTxs computes the missing reward transactions for each node and requests them func ComputeAndRequestMissingRewardTxs( nodes []*TestProcessorNode, generatedDataHashes [][]byte, @@ -928,7 +929,6 @@ func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][] for i := 0; i < len(generatedTxHashes); i++ { _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) if !ok { - //tx is still missing neededTxs = append(neededTxs, generatedTxHashes[i]) } } @@ -942,7 +942,6 @@ func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte for i := 0; i < len(generatedTxHashes); i++ { _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) if !ok { - //tx is still missing neededTxs = append(neededTxs, generatedTxHashes[i]) } } @@ -967,42 +966,33 @@ func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededD } // CreateRequesterDataPool creates a datapool with a mock txPool -func CreateRequesterDataPool( - t *testing.T, - recvTxs map[int]map[string]struct{}, - mutRecvTxs *sync.Mutex, - nodeIndex int, - nbShards uint32, -) dataRetriever.PoolsHolder { +func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int, ) dataRetriever.PoolsHolder { //not allowed to request data from the same shard - return CreateTestShardDataPool( - &mock.ShardedDataStub{ - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil, false - }, - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - - txMap := recvTxs[nodeIndex] - if txMap == nil { - txMap = make(map[string]struct{}) - recvTxs[nodeIndex] = txMap - } + return CreateTestShardDataPool(&mock.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil, false + }, + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + + txMap := recvTxs[nodeIndex] + if txMap == nil { + txMap = make(map[string]struct{}) + recvTxs[nodeIndex] = txMap + } - txMap[string(key)] = struct{}{} - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, + txMap[string(key)] = struct{}{} }, - nbShards, - ) + RegisterHandlerCalled: func(i func(key []byte)) { + }, + }) } // CreateResolversDataPool creates a datapool containing a given number of transactions @@ -1025,7 +1015,7 @@ func CreateResolversDataPool( txHashes[i] = txHash } - return CreateTestShardDataPool(txPool, shardCoordinator.NumberOfShards()), txHashes + return CreateTestShardDataPool(txPool), txHashes } func generateValidTx( diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index ef3d2b3ca90..bc8de186bc1 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -219,7 +219,7 @@ func (tpn *TestProcessorNode) initDataPools() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.MetaDataPool = CreateTestMetaDataPool() } else { - tpn.ShardDataPool = CreateTestShardDataPool(nil, tpn.ShardCoordinator.NumberOfShards()) + tpn.ShardDataPool = CreateTestShardDataPool(nil) } } @@ -342,6 +342,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer,_:= rewardsInter.(process.InternalTransactionProducer) tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( tpn.AccntState, @@ -398,6 +399,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScProcessor, tpn.ScProcessor.(process.SmartContractResultProcessor), tpn.RewardsProcessor, + internalTxProducer, ) tpn.PreProcessorsContainer, _ = fact.Create() diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index f0d3329c7d6..e6e43654c69 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -176,7 +176,7 @@ func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, bod for i := 0; i < len(body); i++ { miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d_%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) + part := fmt.Sprintf("MiniBlock_%d->%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/metablock.go b/process/block/metablock.go index 350ec578070..063da228db3 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1251,7 +1251,7 @@ func displayShardInfo(lines []*display.LineData, header *block.MetaBlock) []*dis receiverShard := shardData.ShardMiniBlockHeaders[j].ReceiverShardId lines = append(lines, display.NewLineData(false, []string{ "", - fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d_%d", j+1, senderShard, receiverShard), + fmt.Sprintf("%d ShardMiniBlockHeaderHash_%d->%d", j+1, senderShard, receiverShard), core.ToB64(shardData.ShardMiniBlockHeaders[j].Hash)})) } else if j == 1 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 0866a02dd01..5f3b0a0bd07 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -24,6 +24,7 @@ type rewardTxPreprocessor struct { rewardTxPool dataRetriever.ShardedDataCacherNotifier storage dataRetriever.StorageService rewardsProcessor process.RewardTransactionProcessor + rewardsProducer process.InternalTransactionProducer accounts state.AccountsAdapter } @@ -34,6 +35,7 @@ func NewRewardTxPreprocessor( hasher hashing.Hasher, marshalizer marshal.Marshalizer, rewardProcessor process.RewardTransactionProcessor, + rewardProducer process.InternalTransactionProducer, shardCoordinator sharding.Coordinator, accounts state.AccountsAdapter, onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), @@ -54,6 +56,9 @@ func NewRewardTxPreprocessor( if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { return nil, process.ErrNilTxProcessor } + if rewardProducer == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } @@ -76,6 +81,7 @@ func NewRewardTxPreprocessor( rewardTxPool: rewardTxDataPool, onRequestRewardTx: onRequestRewardTransaction, rewardsProcessor: rewardProcessor, + rewardsProducer: rewardProducer, accounts: accounts, } @@ -168,7 +174,14 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( } // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state -func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocksSlice) + // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -177,7 +190,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -217,13 +230,13 @@ func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMinib for _, txHash := range rewardMb.TxHashes { tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) if !ok { - log.Error("reward transaction should be in pool but not found") + log.Error(process.ErrRewardTransactionNotFound.Error()) continue } rTx, ok := tx.(*rewardTx.RewardTx) if !ok { - log.Error("wrong type in reward transactions pool") + log.Error(process.ErrWrongTypeAssertion.Error()) } rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ @@ -416,6 +429,46 @@ func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardI return nil, nil } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error){ + + // always have time for rewards + haveTime := func() bool { + return true + } + + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocksSlice) + + snapshot := rtp.accounts.JournalLen() + + for _, mb := range rewardMiniBlocksSlice { + err := rtp.ProcessMiniBlock(mb, haveTime, round) + + if err != nil { + log.Error(err.Error()) + errAccountState := rtp.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } + return nil, err + } + } + + return rewardMiniBlocksSlice, nil +} + // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index df27ec24f1e..c9d009ec763 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -294,7 +294,7 @@ func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { currTx.Epoch = rtxh.address.Epoch() currTx.Round = rtxh.address.Round() - return currTx + return currTx } func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { @@ -318,7 +318,7 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { currTx.Epoch = rtxh.address.Epoch() currTx.Round = rtxh.address.Round() - return currTx + return currTx } // createRewardFromFees creates the reward transactions from accumulated fees @@ -382,6 +382,10 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) } + if len(calculatedRewardTxs) != len(rtxh.rewardTxsForBlock) { + return process.ErrRewardTxsMismatchCreatedReceived + } + totalCalculatedFees := big.NewInt(0) for _, value := range calculatedRewardTxs { totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) @@ -400,14 +404,9 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { } } - if totalCalculatedFees.Cmp(totalFeesFromBlock) != 0 { - return process.ErrTotalTxsFeesDoNotMatch - } - return nil } - // GetAllCurrentFinishedTxs returns the cached finalized transactions for current round func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { rtxh.mut.Lock() diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index fbad34382fb..fbcc6fa0fac 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -288,6 +288,9 @@ func TestRewardTxHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { currTxFee := big.NewInt(50) th.ProcessTransactionFee(currTxFee) _ = th.CreateAllInterMiniBlocks() + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.BurnAddress()}}) err = th.verifyCreatedRewardsTxs() assert.Equal(t, process.ErrRewardTxNotFound, err) } @@ -324,7 +327,7 @@ func TestRewardTxHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testin _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) _ = th.CreateAllInterMiniBlocks() err = th.verifyCreatedRewardsTxs() - assert.Equal(t, process.ErrTotalTxsFeesDoNotMatch, err) + assert.Equal(t, process.ErrRewardTxsMismatchCreatedReceived, err) } func TestRewardTxHandlerVerifyCreatedRewardsTxsOK(t *testing.T) { diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 9cbfb7940b5..d87a01953ab 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -172,7 +172,7 @@ func (scr *smartContractResults) RestoreTxBlockIntoPools( } // ProcessBlockTransactions processes all the smartContractResult from the block.Body, updates the state -func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -188,7 +188,7 @@ func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -395,6 +395,17 @@ func (scr *smartContractResults) CreateAndProcessMiniBlock(sndShardId, dstShardI return nil, nil } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (scr *smartContractResults) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error) { + return nil, nil +} + // ProcessMiniBlock processes all the smartContractResults from a and saves the processed smartContractResults in local cache complete miniblock func (scr *smartContractResults) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { if miniBlock.Type != block.SmartContractResultBlock { diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 59169d82c9d..2e67e19317c 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -635,7 +635,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { scr.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&smartcr, &txshardInfo} - err := scr.ProcessBlockTransactions(body, 1, haveTime) + err := scr.ProcessBlockTransactions(body, 1, haveTimeTrue) assert.Nil(t, err) } diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 059874a3b7b..1775704735b 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -180,7 +180,7 @@ func (txs *transactions) RestoreTxBlockIntoPools( } // ProcessBlockTransactions processes all the transaction from the block.Body, updates the state -func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -189,7 +189,7 @@ func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -400,6 +400,52 @@ func isSmartContractAddress(rcvAddress []byte) bool { return false } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the transactions added into the miniblocks +// as long as it has time +func (txs *transactions) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + + miniBlocks := make(block.MiniBlockSlice, 0) + newMBAdded := true + txSpaceRemained := int(maxTxSpaceRemained) + + for newMBAdded { + newMBAdded = false + for shardId := uint32(0); shardId < txs.shardCoordinator.NumberOfShards(); shardId++ { + if maxTxSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txs.CreateAndProcessMiniBlock( + txs.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + return miniBlocks, nil +} + // CreateAndProcessMiniBlock creates the miniblock from storage and processes the transactions added into the miniblock func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index df5b56769de..7a41ed0311b 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "math/big" "math/rand" "reflect" @@ -14,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index bb67ef3375f..57cca62a2a7 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -616,6 +616,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -801,6 +802,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2073,6 +2075,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2762,6 +2765,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2897,6 +2901,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -3380,6 +3385,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -3612,6 +3618,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -4658,7 +4665,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHdr(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + dataPool := integrationTests.CreateTestShardDataPool(nil) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -4724,7 +4731,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrButNotStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + dataPool := integrationTests.CreateTestShardDataPool(nil) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -4789,7 +4796,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrStored(t *testing.T) { t.Parallel() - dataPool := integrationTests.CreateTestShardDataPool(nil, 3) + dataPool := integrationTests.CreateTestShardDataPool(nil) store := initStore() hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} diff --git a/process/coordinator/process.go b/process/coordinator/process.go index fb2c460da86..4a176b39d35 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,7 +1,6 @@ package coordinator import ( - "fmt" "sort" "sync" "time" @@ -208,8 +207,7 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error errMutex := sync.Mutex{} wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) + wg.Add(len(separatedBodies) + len(tc.keysInterimProcs)) for key, value := range separatedBodies { go func(blockType block.Type, blockBody block.Body) { @@ -232,35 +230,28 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error }(key, value) } - wg.Wait() - - intermediatePreprocSC := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreprocSC == nil { - return errFound - } + for _, blockType := range tc.keysInterimProcs { + go func(blockType block.Type) { + intermediateProc := tc.getInterimProcessor(blockType) + if intermediateProc == nil { + wg.Done() + return + } - err := intermediatePreprocSC.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + err := intermediateProc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - intermediatePreproc := tc.getInterimProcessor(block.RewardsBlock) - if intermediatePreproc == nil { - return errFound + wg.Done() + }(blockType) } - err = intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) - - errMutex.Lock() - errFound = err - errMutex.Unlock() - } + wg.Wait() return errFound } @@ -353,10 +344,14 @@ func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error func (tc *transactionCoordinator) ProcessBlockTransaction( body block.Body, round uint64, - haveTime func() time.Duration, + timeRemaining func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) + haveTime := func() bool { + return timeRemaining() >= 0 + } + + separatedBodies := tc.separateBodyByType(body) // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysTxPreProcs { if separatedBodies[blockType] == nil { @@ -377,21 +372,6 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( } } - // create the reward txs and make them available for processing - mbRewards := tc.createRewardsMiniBlocks() - preproc := tc.getPreProcessor(block.RewardsBlock) - rewardsPreProc, ok := preproc.(process.RewardTransactionPreProcessor) - if !ok { - return process.ErrWrongTypeAssertion - } - - rewardsPreProc.AddComputedRewardMiniBlocks(mbRewards) - - err := preproc.ProcessBlockTransactions(separatedBodies[block.RewardsBlock], round, haveTime) - if err != nil { - return err - } - return nil } @@ -478,94 +458,46 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false + for _, blockType := range tc.keysTxPreProcs { - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } + txPreProc := tc.getPreProcessor(blockType) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } + mbs, err := txPreProc.CreateAndProcessMiniBlocks( + maxTxSpaceRemained, + maxMbSpaceRemained, + round, + haveTime, + ) - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } + if err != nil { + log.Error(err.Error()) + } - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } + if len(mbs) > 0 { + miniBlocks = append(miniBlocks, mbs...) } } - interMBs := tc.processAddedInterimTransactions() + interMBs := tc.processAddedInterimTransactions(round, haveTime) if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } - rewardMb := tc.createRewardsMiniBlocks() - if len(rewardMb) == 0 { - log.Error("could not create reward mini-blocks") - } - - rewardsPreProc := tc.getPreProcessor(block.RewardsBlock) - for _, mb := range rewardMb { - err := tc.processCompleteMiniBlock(rewardsPreProc, mb, round, haveTime) - if err != nil { - log.Error(fmt.Sprintf("could not process created reward miniblock: %s", err.Error())) - } - } - miniBlocks = append(miniBlocks, rewardMb...) - return miniBlocks } -func (tc *transactionCoordinator) createRewardsMiniBlocks() block.MiniBlockSlice { - // add rewards transactions to separate miniBlocks - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil - } - - miniBlocks := make(block.MiniBlockSlice, 0) - rewardsMbs := interimProc.CreateAllInterMiniBlocks() - for _, mb := range rewardsMbs { - miniBlocks = append(miniBlocks, mb) - } - - return miniBlocks -} +func (tc *transactionCoordinator) processAddedInterimTransactions( + round uint64, + haveTime func() bool, +) block.MiniBlockSlice { -func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { miniBlocks := make(block.MiniBlockSlice, 0) - // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { - if blockType == block.RewardsBlock { - // this has to be processed last - continue - } - interimProc := tc.getInterimProcessor(blockType) if interimProc == nil { // this will never be reached as keysInterimProcs are the actual keys from the interimMap @@ -573,6 +505,7 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl } currMbs := interimProc.CreateAllInterMiniBlocks() + for _, value := range currMbs { miniBlocks = append(miniBlocks, value) } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 6d41da82251..347c071f72b 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -368,6 +368,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -407,6 +408,7 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -677,6 +679,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -766,6 +769,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1129,6 +1133,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1285,6 +1290,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1404,6 +1410,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1518,6 +1525,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1623,6 +1631,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() diff --git a/process/errors.go b/process/errors.go index 9f32c03795a..4f132a8ab35 100644 --- a/process/errors.go +++ b/process/errors.go @@ -262,6 +262,9 @@ var ErrNilResolverContainer = errors.New("nil resolver container") // ErrNilRequestHandler signals that a nil request handler interface was provided var ErrNilRequestHandler = errors.New("nil request handler") +// ErrNilInternalTransactionProducer signals that a nil system transactions producer was provided +var ErrNilInternalTransactionProducer = errors.New("nil internal transaction producere") + // ErrNilHaveTimeHandler signals that a nil have time handler func was provided var ErrNilHaveTimeHandler = errors.New("nil have time handler") @@ -370,6 +373,9 @@ var ErrNilSmartContractResult = errors.New("smart contract result is nil") // ErrNilRewardTransaction signals that the reward transaction is nil var ErrNilRewardTransaction = errors.New("reward transaction is nil") +// ErrRewardTransactionNotFound is raised when reward transaction should be present but was not found +var ErrRewardTransactionNotFound = errors.New("reward transaction not found") + // ErrInvalidDataInput signals that the data input is invalid for parsing var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") @@ -451,8 +457,8 @@ var ErrRewardTxsDoNotMatch = errors.New("calculated reward tx with block reward // ErrRewardTxNotFound signals that the reward transaction was not found var ErrRewardTxNotFound = errors.New("reward transaction not found") -// ErrTotalTxsFeesDoNotMatch signals that the total tx fee do not match -var ErrTotalTxsFeesDoNotMatch = errors.New("total tx fees do not match") +// ErrRewardTxsMismatchCreatedReceived signals a mismatch between the nb of created and received reward transactions +var ErrRewardTxsMismatchCreatedReceived = errors.New("mismatch between created and received reward transactions") // ErrNilTxTypeHandler signals that tx type handler is nil var ErrNilTxTypeHandler = errors.New("nil tx type handler") diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 09bc0573f1b..280cf080667 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -25,6 +25,7 @@ type preProcessorsContainerFactory struct { rewardsTxProcessor process.RewardTransactionProcessor accounts state.AccountsAdapter requestHandler process.RequestHandler + rewardsProducer process.InternalTransactionProducer } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -41,6 +42,7 @@ func NewPreProcessorsContainerFactory( scProcessor process.SmartContractProcessor, scResultProcessor process.SmartContractResultProcessor, rewardsTxProcessor process.RewardTransactionProcessor, + rewardsProducer process.InternalTransactionProducer, ) (*preProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -79,6 +81,9 @@ func NewPreProcessorsContainerFactory( if requestHandler == nil || requestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } + if rewardsProducer == nil || rewardsProducer.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } return &preProcessorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -93,6 +98,7 @@ func NewPreProcessorsContainerFactory( scResultProcessor: scResultProcessor, rewardsTxProcessor: rewardsTxProcessor, requestHandler: requestHandler, + rewardsProducer: rewardsProducer, }, nil } @@ -170,6 +176,7 @@ func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor( ppcm.hasher, ppcm.marshalizer, ppcm.rewardsTxProcessor, + ppcm.rewardsProducer, ppcm.shardCoordinator, ppcm.accounts, ppcm.requestHandler.RequestRewardTransactions, diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index be2df4e8b94..7d25e5015a6 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -24,6 +24,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -46,6 +47,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -68,6 +70,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -90,6 +93,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -112,6 +116,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -134,6 +139,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -156,6 +162,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -178,6 +185,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -200,6 +208,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { nil, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -222,6 +231,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &mock.SCProcessorMock{}, nil, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) @@ -244,6 +254,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, nil, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilRewardsTxProcessor, err) @@ -266,6 +277,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -288,6 +300,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -314,6 +327,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -350,6 +364,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -395,6 +410,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) diff --git a/process/interface.go b/process/interface.go index 3f9a9d8e498..c31497302a8 100644 --- a/process/interface.go +++ b/process/interface.go @@ -103,6 +103,12 @@ type IntermediateTransactionHandler interface { IsInterfaceNil() bool } +// InternalTransactionProducer creates system transactions (e.g. rewards) +type InternalTransactionProducer interface { + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + IsInterfaceNil() bool +} + // TransactionVerifier interface validates if the transaction is good and if it should be processed type TransactionVerifier interface { IsTransactionValid(tx data.TransactionHandler) error @@ -137,7 +143,7 @@ type PreProcessor interface { RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactions(body block.Body) int CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) @@ -145,6 +151,7 @@ type PreProcessor interface { RequestTransactionsForMiniBlock(mb block.MiniBlock) int ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + CreateAndProcessMiniBlocks(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) GetAllCurrentUsedTxs() map[string]data.TransactionHandler IsInterfaceNil() bool diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index c4e069a0bf9..e5a9b7a19d7 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -14,11 +14,12 @@ type PreProcessorMock struct { RemoveTxBlockFromPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) error RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) SaveTxBlockToStorageCalled func(body block.Body) error - ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactionsCalled func(body block.Body) int CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) RequestTransactionsForMiniBlockCalled func(mb block.MiniBlock) int ProcessMiniBlockCalled func(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlocksCalled func(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) CreateAndProcessMiniBlockCalled func(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) GetAllCurrentUsedTxsCalled func() map[string]data.TransactionHandler } @@ -58,7 +59,7 @@ func (ppm *PreProcessorMock) SaveTxBlockToStorage(body block.Body) error { return ppm.SaveTxBlockToStorageCalled(body) } -func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { if ppm.ProcessBlockTransactionsCalled == nil { return nil } @@ -93,6 +94,20 @@ func (ppm *PreProcessorMock) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTi return ppm.ProcessMiniBlockCalled(miniBlock, haveTime, round) } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (ppm *PreProcessorMock) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + if ppm.CreateAndProcessMiniBlocksCalled == nil { + return nil, nil + } + return ppm.CreateAndProcessMiniBlocksCalled(maxTxSpaceRemained, maxMbSpaceRemained, round, haveTime) +} + func (ppm *PreProcessorMock) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { if ppm.CreateAndProcessMiniBlockCalled == nil { return nil, nil diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go index 8a97f316786..eb4cc1157df 100644 --- a/process/rewardTransaction/interceptedRewardTransaction.go +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -37,16 +37,16 @@ func NewInterceptedRewardTransaction( if rewardTxBuff == nil { return nil, process.ErrNilBuffer } - if marshalizer == nil { + if marshalizer == nil || marshalizer.IsInterfaceNil() { return nil, process.ErrNilMarshalizer } - if hasher == nil { + if hasher == nil || hasher.IsInterfaceNil() { return nil, process.ErrNilHasher } - if addrConv == nil { + if addrConv == nil || addrConv.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } - if coordinator == nil { + if coordinator == nil || coordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go index a0b855818fb..2707c4ca34d 100644 --- a/process/rewardTransaction/interceptor.go +++ b/process/rewardTransaction/interceptor.go @@ -35,22 +35,22 @@ func NewRewardTxInterceptor( shardCoordinator sharding.Coordinator, ) (*RewardTxInterceptor, error) { - if marshalizer == nil { + if marshalizer == nil || marshalizer.IsInterfaceNil() { return nil, process.ErrNilMarshalizer } - if rewardTxPool == nil { + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { return nil, process.ErrNilRewardTxDataPool } - if rewardTxStorer == nil { + if rewardTxStorer == nil || rewardTxStorer.IsInterfaceNil() { return nil, process.ErrNilRewardsTxStorage } - if addrConverter == nil { + if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } - if hasher == nil { + if hasher == nil || hasher.IsInterfaceNil() { return nil, process.ErrNilHasher } - if shardCoordinator == nil { + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } @@ -69,7 +69,7 @@ func NewRewardTxInterceptor( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { - if message == nil { + if message == nil || message.IsInterfaceNil() { return process.ErrNilMessage } From d3301bd962d31795d9f151e4cba27d3c81d8d059 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 16 Sep 2019 06:58:15 +0300 Subject: [PATCH 099/234] process: refactor rewards management in transaction coordinator --- .../block/preprocess/rewardTxPreProcessor.go | 2 -- process/block/shardblock.go | 3 +-- process/coordinator/process.go | 20 +++++++++---------- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 5f3b0a0bd07..769be213c9b 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -224,7 +224,6 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round // AddComputedRewardMiniBlocks adds to the local cache the reward transactions from the given miniblocks func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { - for _, rewardMb := range computedRewardMiniblocks { txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} for _, txHash := range rewardMb.TxHashes { @@ -448,7 +447,6 @@ func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( for _, mb := range computedRewardsMbsMap { rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) } - rtp.AddComputedRewardMiniBlocks(rewardMiniBlocksSlice) snapshot := rtp.accounts.JournalLen() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 0400a1fb57d..8371ceb33d1 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -256,8 +256,7 @@ func (sp *shardProcessor) ProcessBlock( } if !sp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch - return err + return process.ErrRootStateMissmatch } err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 4a176b39d35..037a8826919 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -357,9 +357,6 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( if separatedBodies[blockType] == nil { continue } - if blockType == block.RewardsBlock { - continue - } preproc := tc.getPreProcessor(blockType) if preproc == nil || preproc.IsInterfaceNil() { @@ -482,7 +479,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( } } - interMBs := tc.processAddedInterimTransactions(round, haveTime) + interMBs := tc.processAddedInterimTransactions() if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } @@ -490,14 +487,16 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( return miniBlocks } -func (tc *transactionCoordinator) processAddedInterimTransactions( - round uint64, - haveTime func() bool, -) block.MiniBlockSlice { - +func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBlockSlice { miniBlocks := make(block.MiniBlockSlice, 0) + // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + interimProc := tc.getInterimProcessor(blockType) if interimProc == nil { // this will never be reached as keysInterimProcs are the actual keys from the interimMap @@ -505,7 +504,6 @@ func (tc *transactionCoordinator) processAddedInterimTransactions( } currMbs := interimProc.CreateAllInterMiniBlocks() - for _, value := range currMbs { miniBlocks = append(miniBlocks, value) } @@ -698,7 +696,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( snapshot := tc.accounts.JournalLen() err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) if err != nil { - log.Debug(err.Error()) + log.Error(err.Error()) errAccountState := tc.accounts.RevertToSnapshot(snapshot) if errAccountState != nil { // TODO: evaluate if reloading the trie from disk will might solve the problem From 412b0384237a762825e3693500aac92ac74ab820 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 16 Sep 2019 08:48:01 +0300 Subject: [PATCH 100/234] process: fix - revert state on root state missmatch --- process/block/shardblock.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8371ceb33d1..0400a1fb57d 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -256,7 +256,8 @@ func (sp *shardProcessor) ProcessBlock( } if !sp.verifyStateRoot(header.GetRootHash()) { - return process.ErrRootStateMissmatch + err = process.ErrRootStateMissmatch + return err } err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) From 51bcb85c61e6aecc17015e4879ddc10a9002e302 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 16 Sep 2019 12:23:46 +0300 Subject: [PATCH 101/234] process: missing comments on exported methods --- data/address/specialAddresses.go | 2 ++ process/block/preprocess/rewardsHandler.go | 2 +- process/factory/shard/intermediateProcessorsContainerFactory.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index 06ba4848e9f..dffc427681b 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -83,10 +83,12 @@ func (sp *specialAddresses) ConsensusRewardAddresses() []string { return sp.consensusRewardAddresses } +// Round returns the round for the current block func (sp *specialAddresses) Round() uint64 { return sp.round } +// Epoch returns the epoch for the current block func (sp *specialAddresses) Epoch() uint32 { return sp.epoch } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index c9d009ec763..722d794d519 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -322,7 +322,7 @@ func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { } // createRewardFromFees creates the reward transactions from accumulated fees -// According to economic paper, out of the block fees 50% are burned, 40% go to the +// According to economic paper, out of the block fees 40% are burned, 50% go to the // leader and 10% go to Elrond community fund. func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { rtxh.mut.Lock() diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 5daadb6c230..fc70456c858 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -51,7 +51,7 @@ func NewIntermediateProcessorsContainerFactory( if store == nil || store.IsInterfaceNil() { return nil, process.ErrNilStorage } - if poolsHolder == nil { + if poolsHolder == nil || poolsHolder.IsInterfaceNil() { return nil, process.ErrNilPoolsHolder } From a78e8d61bad526a53ed7a32c688c8cc94d1d9329 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 16 Sep 2019 16:24:09 +0300 Subject: [PATCH 102/234] process: add unit test for indexed reward transactions --- core/indexer/elasticsearch.go | 2 +- process/block/preprocess/rewardsHandler.go | 26 ------ .../block/preprocess/rewardsHandler_test.go | 85 +++++++++++++++++-- 3 files changed, 77 insertions(+), 36 deletions(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index e11974d95ac..168280060a4 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -5,7 +5,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/ElrondNetwork/elrond-go/data/rewardTx" "io" "math/big" "net/http" @@ -17,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 6aef64e1c1c..722d794d519 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -407,32 +407,6 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { return nil } -// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round -func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { - rtxh.mut.Lock() - - rewardTxPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range rtxh.rewardTxsForBlock { - - senderShard := txInfo.ShardId - receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) - if err != nil { - continue - } - if receiverShard != rtxh.shardCoordinator.SelfId() { - continue - } - if senderShard != rtxh.shardCoordinator.SelfId() { - continue - } - rewardTxPool[txHash] = txInfo - } - rtxh.mut.Unlock() - - return rewardTxPool -} - - // GetAllCurrentFinishedTxs returns the cached finalized transactions for current round func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { rtxh.mut.Lock() diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index fbcc6fa0fac..821e3f31152 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -2,6 +2,7 @@ package preprocess import ( "math/big" + "reflect" "testing" "github.com/ElrondNetwork/elrond-go/data" @@ -154,7 +155,7 @@ func TestNewRewardTxHandler_ValsOk(t *testing.T) { assert.NotNil(t, th) } -func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { +func TestRewardsHandler_AddIntermediateTransactions(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -175,7 +176,7 @@ func TestRewardTxHandlerAddIntermediateTransactions(t *testing.T) { assert.Nil(t, err) } -func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { +func TestRewardsHandler_ProcessTransactionFee(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -202,7 +203,7 @@ func TestRewardTxHandlerProcessTransactionFee(t *testing.T) { assert.Equal(t, big.NewInt(110), th.accumulatedFees) } -func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { +func TestRewardsHandler_cleanCachedData(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -229,7 +230,7 @@ func TestRewardTxHandlerCleanProcessedUTxs(t *testing.T) { assert.Equal(t, 0, len(th.rewardTxsForBlock)) } -func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { +func TestRewardsHandler_CreateRewardsFromFees(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -262,7 +263,7 @@ func TestRewardTxHandlerCreateAllUTxs(t *testing.T) { assert.Equal(t, currTxFee.Uint64(), totalSum) } -func TestRewardTxHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { +func TestRewardsHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -295,7 +296,7 @@ func TestRewardTxHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { assert.Equal(t, process.ErrRewardTxNotFound, err) } -func TestRewardTxHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { +func TestRewardsHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -330,7 +331,7 @@ func TestRewardTxHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testin assert.Equal(t, process.ErrRewardTxsMismatchCreatedReceived, err) } -func TestRewardTxHandlerVerifyCreatedRewardsTxsOK(t *testing.T) { +func TestRewardsHandler_VerifyCreatedRewardsTxsOK(t *testing.T) { t.Parallel() tdp := initDataPool() @@ -360,10 +361,10 @@ func TestRewardTxHandlerVerifyCreatedRewardsTxsOK(t *testing.T) { assert.Nil(t, err) } -func TestRewardTxHandlerCreateAllInterMiniBlocksOK(t *testing.T) { +func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) tdp := initDataPool() th, err := NewRewardTxHandler( &mock.SpecialAddressHandlerMock{ @@ -389,3 +390,69 @@ func TestRewardTxHandlerCreateAllInterMiniBlocksOK(t *testing.T) { mbs = th.CreateAllInterMiniBlocks() assert.Equal(t, 1, len(mbs)) } + +func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + tdp := initDataPool() + specialAddress := &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator, + } + + consensusAddresses := []string{ + "1000000000000000000000000000000000000000000000000000000000000000", + "2000000000000000000000000000000000000000000000000000000000000000", + } + + specialAddress.SetConsensusData(consensusAddresses, 0, 0) + + th, err := NewRewardTxHandler( + specialAddress, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte(consensusAddresses[0]), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte(consensusAddresses[1]), + ShardId: 0, + }, + } + + err = th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + finishedTxs := th.GetAllCurrentFinishedTxs() + assert.Equal(t, 2, len(txs)) + + for _, ftx := range finishedTxs { + found := false + for _, tx := range txs { + if reflect.DeepEqual(tx, ftx) { + found = true + break + } + } + + assert.True(t, found) + } +} From a7ced5c9db82eedd15290c91b64b9c89d84e2b83 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 16 Sep 2019 19:37:58 +0300 Subject: [PATCH 103/234] integrationTests: protocol rewards and transaction fee rewards integration test --- .../mock/specialAddressHandlerMock.go | 3 + .../block/executingMiniblocks_test.go | 22 +- .../block/executingRewardMiniblocks_test.go | 223 ++++++++++++++++++ integrationTests/testInitializer.go | 88 ++++++- .../testProcessorNodeWithMultisigner.go | 101 +++++++- 5 files changed, 413 insertions(+), 24 deletions(-) create mode 100644 integrationTests/multiShard/block/executingRewardMiniblocks_test.go diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index a9af9a371df..6211e8d1d37 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -47,6 +47,9 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { if sh.LeaderAddressCalled == nil { + if len(sh.addresses) > 0 { + return []byte(sh.addresses[0]) + } return []byte("leader0000000000000000000000000000") } diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 030e7622e48..e8bb7e873b2 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -59,17 +59,17 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) - receiversPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) //receivers in same shard with the sender - sk, _, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - receiversPrivateKeys[senderShard] = append(receiversPrivateKeys[senderShard], sk) + _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) + receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) //receivers in other shards for _, shardId := range recvShards { - sk, _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) - receiversPrivateKeys[shardId] = append(receiversPrivateKeys[shardId], sk) + _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) + receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) } } @@ -77,7 +77,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { integrationTests.GenerateAndDisseminateTxs( proposerNode, sendersPrivateKeys, - receiversPrivateKeys, + receiversPublicKeys, valToTransferPerTx, gasPricePerTx, gasLimitPerTx, @@ -108,13 +108,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //test sender balances for _, sk := range sendersPrivateKeys { - valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPrivateKeys)))) + valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[proposerNode.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } @@ -132,8 +132,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[n.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } } diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go new file mode 100644 index 00000000000..2c6d837796e --- /dev/null +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -0,0 +1,223 @@ +package block + +import ( + "context" + "errors" + "fmt" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestExecuteBlocksWithOnlyRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + randomness := generateInitialRandomness(uint32(nbShards)) + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + nbTxsPerShard := 100 + mintValue := big.NewInt(1000000) + + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForAddress := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + gasPrice := uint64(10) + gasLimit := uint64(100) + feePerTxForLeader := gasPrice * gasLimit / 2 + valToTransfer := big.NewInt(100) + + sendersPrivateKeys := integrationTests.CreateAndSendIntraShardTransactions( + nodesMap, + nbTxsPerShard, + gasPrice, + gasLimit, + valToTransfer, + ) + + for shardId, nodes := range nodesMap { + if shardId == sharding.MetachainShardId { + continue + } + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders( + nodes, + shardId, + sendersPrivateKeys[shardId], + mintValue, + ) + } + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for shardId, consensusGroup := range consensusNodes { + addrRewards := consensusGroup[0].SpecialAddressHandler.ConsensusRewardAddresses() + updateExpectedRewards(mapRewardsForAddress, addrRewards) + nbTxs := transactionsFromHeaderInShard(t, headers, shardId) + + // without metachain nodes for now + if len(addrRewards) > 0 { + updateNbTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) + } + } + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + for address, nbRewards := range mapRewardsForAddress { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) + shard := nodesMap[0][0].ShardCoordinator.ComputeId(addrContainer) + + for _, shardNode := range nodesMap[shard] { + acc, err := shardNode.AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + nbProposedTxs := nbTxsForLeaderAddress[address] + expectedBalance := int64(nbRewards)*1000 + int64(nbProposedTxs)*int64(feePerTxForLeader) + fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) + assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) + } + } +} + +func getBlockProposersIndexes( + consensusMap map[uint32][]*integrationTests.TestProcessorNode, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, +) map[uint32]int { + + indexProposer := make(map[uint32]int) + + for sh, testNodeList := range nodesMap { + for k, testNode := range testNodeList { + if reflect.DeepEqual(consensusMap[sh][0], testNode) { + indexProposer[sh] = k + } + } + } + + return indexProposer +} + +func generateInitialRandomness(nbShards uint32) map[uint32][]byte { + randomness := make(map[uint32][]byte) + + for i := uint32(0); i < nbShards; i++ { + randomness[i] = []byte("root hash") + } + + randomness[sharding.MetachainShardId] = []byte("root hash") + + return randomness +} + +func transactionsFromHeaderInShard(t *testing.T, headers map[uint32]data.HeaderHandler, shardId uint32) uint32 { + if shardId == sharding.MetachainShardId { + return 0 + } + + header, ok := headers[shardId] + if !ok { + return 0 + } + + hdr, ok := header.(*block.Header) + if !ok { + assert.Error(t, process.ErrWrongTypeAssertion) + } + + nbTxs := uint32(0) + for _, mb := range hdr.MiniBlockHeaders { + if mb.SenderShardID == shardId && mb.Type == block.TxBlock { + nbTxs += mb.TxCount + } + } + + return nbTxs +} + +func updateExpectedRewards(rewardsForAddress map[string]uint32, addresses []string) { + for i := 0; i < len(addresses); i++ { + if addresses[i] == "" { + continue + } + currentRewards, ok := rewardsForAddress[addresses[i]] + if !ok { + currentRewards = 0 + } + + rewardsForAddress[addresses[i]] = currentRewards + 1 + } +} + +func updateNbTransactionsProposed( + t *testing.T, + transactionsForLeader map[string]uint32, + addressProposer string, + nbTransactions uint32, +) { + if addressProposer == "" { + assert.Error(t, errors.New("invalid address")) + } + + proposedTransactions, ok := transactionsForLeader[addressProposer] + if !ok { + proposedTransactions = 0 + } + + transactionsForLeader[addressProposer] = proposedTransactions + nbTransactions +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index a7370f5cbd7..f32c90a2b38 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -707,7 +707,7 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { func GenerateAndDisseminateTxs( n *TestProcessorNode, senders []crypto.PrivateKey, - receiversPrivateKeys map[uint32][]crypto.PrivateKey, + receiversPublicKeysMap map[uint32][]crypto.PublicKey, valToTransfer *big.Int, gasPrice uint64, gasLimit uint64, @@ -716,9 +716,9 @@ func GenerateAndDisseminateTxs( for i := 0; i < len(senders); i++ { senderKey := senders[i] incrementalNonce := make([]uint64, len(senders)) - for _, recvPrivateKeys := range receiversPrivateKeys { - receiverKey := recvPrivateKeys[i] - tx := generateTransferTx(incrementalNonce[i], senderKey, receiverKey, valToTransfer, gasPrice, gasLimit) + for _, shardReceiversPublicKeys := range receiversPublicKeysMap { + receiverPubKey := shardReceiversPublicKeys[i] + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverPubKey, valToTransfer, gasPrice, gasLimit) _, _ = n.SendTransaction(tx) incrementalNonce[i]++ } @@ -737,25 +737,26 @@ type txArgs struct { func generateTransferTx( nonce uint64, - sender crypto.PrivateKey, - receiver crypto.PrivateKey, + senderPrivateKey crypto.PrivateKey, + receiverPublicKey crypto.PublicKey, valToTransfer *big.Int, gasPrice uint64, gasLimit uint64, ) *transaction.Transaction { + receiverPubKeyBytes, _ := receiverPublicKey.ToByteArray() tx := transaction.Transaction{ Nonce: nonce, Value: valToTransfer, - RcvAddr: skToPk(receiver), - SndAddr: skToPk(sender), + RcvAddr: receiverPubKeyBytes, + SndAddr: skToPk(senderPrivateKey), Data: "", GasLimit: gasLimit, GasPrice: gasPrice, } txBuff, _ := TestMarshalizer.Marshal(&tx) signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(sender, txBuff) + tx.Signature, _ = signer.Sign(senderPrivateKey, txBuff) return &tx } @@ -785,6 +786,14 @@ func skToPk(sk crypto.PrivateKey) []byte { return pkBuff } +// TestPrivateKeyHasBalance checks if the account corresponding to the given public key has the expected balance +func TestPublicKeyHasBalance(t *testing.T, n *TestProcessorNode, pk crypto.PublicKey, expectedBalance *big.Int) { + pkBuff, _ := pk.ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetExistingAccount(addr) + assert.Equal(t, expectedBalance, account.(*state.Account).Balance) +} + // TestPrivateKeyHasBalance checks if the private key has the expected balance func TestPrivateKeyHasBalance(t *testing.T, n *TestProcessorNode, sk crypto.PrivateKey, expectedBalance *big.Int) { pkBuff, _ := sk.GeneratePublic().ToByteArray() @@ -819,6 +828,11 @@ func GenerateSkAndPkInShard( keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() + if shardId == sharding.MetachainShardId { + // for metachain generate in shard 0 + shardId = 0 + } + for { pkBytes, _ := pk.ToByteArray() addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) @@ -831,6 +845,51 @@ func GenerateSkAndPkInShard( return sk, pk, keyGen } +// CreateAndSendIntraShardTransactions Creates and sends intra shard transactions, returning the senders private keys +func CreateAndSendIntraShardTransactions( + nodes map[uint32][]*TestProcessorNode, + nbTxsPerShard int, + gasPricePerTx uint64, + gasLimitPerTx uint64, + valueToTransfer *big.Int, +) map[uint32][]crypto.PrivateKey { + sendersPrivKeysMap := make(map[uint32][]crypto.PrivateKey) + receiversPubKeysMap := make(map[uint32][]crypto.PublicKey) + + for shardId, _ := range nodes { + if shardId == sharding.MetachainShardId { + continue + } + + nodeInShard := nodes[0][0] + shardId := nodeInShard.ShardCoordinator.SelfId() + receiversPublicKeys := make([]crypto.PublicKey, nbTxsPerShard) + sendersPrivateKeys := make([]crypto.PrivateKey, nbTxsPerShard) + + for i := 0; i < nbTxsPerShard; i++ { + sendersPrivateKeys[i], _, _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + _, receiversPublicKeys[i], _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + } + sendersPrivKeysMap[shardId] = sendersPrivateKeys + receiversPubKeysMap[shardId] = receiversPublicKeys + + fmt.Println("Generating transactions...") + GenerateAndDisseminateTxs( + nodeInShard, + sendersPrivateKeys, + receiversPubKeysMap, + valueToTransfer, + gasPricePerTx, + gasLimitPerTx, + ) + } + + fmt.Println("Delaying for disseminating transactions...") + time.Sleep(time.Second * 5) + + return sendersPrivKeysMap +} + // CreateMintingForSenders creates account with balances for every node in a given shard func CreateMintingForSenders( nodes []*TestProcessorNode, @@ -966,7 +1025,7 @@ func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededD } // CreateRequesterDataPool creates a datapool with a mock txPool -func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int, ) dataRetriever.PoolsHolder { +func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int) dataRetriever.PoolsHolder { //not allowed to request data from the same shard return CreateTestShardDataPool(&mock.ShardedDataStub{ @@ -1174,13 +1233,18 @@ func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]st } // GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map -func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { +func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string, nbShards uint32) map[uint32][]sharding.Validator { validatorsMap := make(map[uint32][]sharding.Validator) for shardId, shardNodesPks := range pubKeysMap { shardValidators := make([]sharding.Validator, 0) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(nbShards, shardId) for i := 0; i < len(shardNodesPks); i++ { - address := []byte(shardNodesPks[i][:32]) + _, pk, _ := GenerateSkAndPkInShard(shardCoordinator, shardId) + address, err := pk.ToByteArray() + if err != nil { + return nil + } v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) shardValidators = append(shardValidators, v) } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index d99b7f7bd07..1c32d70dd4a 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "fmt" + "testing" + "time" "github.com/ElrondNetwork/elrond-go/cmd/node/factory" "github.com/ElrondNetwork/elrond-go/crypto" @@ -12,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) // NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator @@ -73,7 +76,7 @@ func CreateNodesWithNodesCoordinator( ) map[uint32][]*TestProcessorNode { cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) pubKeys := PubKeysMapFromKeysMap(cp.Keys) - validatorsMap := GenValidatorsFromPubKeys(pubKeys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys, uint32(nbShards)) nodesMap := make(map[uint32][]*TestProcessorNode) for shardId, validatorList := range validatorsMap { nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( @@ -194,3 +197,99 @@ func DoConsensusSigningOnBlock( return blockHeader } + +// AllShardsProposeBlock simulates each shard selecting a consensus group and proposing/broadcasting/committing a block +func AllShardsProposeBlock( + round uint64, + nonce uint64, + prevRandomness map[uint32][]byte, + nodesMap map[uint32][]*TestProcessorNode, +) ( + map[uint32]data.BodyHandler, + map[uint32]data.HeaderHandler, + map[uint32][]*TestProcessorNode, + map[uint32][]byte, +) { + + body := make(map[uint32]data.BodyHandler) + header := make(map[uint32]data.HeaderHandler) + consensusNodes := make(map[uint32][]*TestProcessorNode) + newRandomness := make(map[uint32][]byte) + + // propose blocks + for i, _ := range nodesMap { + body[i], header[i], _, consensusNodes[i] = ProposeBlockWithConsensusSignature(i, nodesMap, round, nonce, prevRandomness[i]) + newRandomness[i] = header[i].GetRandSeed() + } + + // propagate blocks + for i, _ := range nodesMap { + consensusNodes[i][0].BroadcastBlock(body[i], header[i]) + consensusNodes[i][0].CommitBlock(body[i], header[i]) + } + + time.Sleep(2 * time.Second) + + return body, header, consensusNodes, newRandomness +} + +// SyncAllShardsWithRoundBlock enforces all nodes in each shard synchronizing the block for the given round +func SyncAllShardsWithRoundBlock( + t *testing.T, + nodesMap map[uint32][]*TestProcessorNode, + indexProposers map[uint32]int, + round uint64, +) { + for shard, nodeList := range nodesMap { + SyncBlock(t, nodeList, []int{indexProposers[shard]}, round) + } + time.Sleep(2 * time.Second) +} + +// VerifyNodesHaveHeaders verifies that each node has the corresponding header +func VerifyNodesHaveHeaders( + t *testing.T, + headers map[uint32]data.HeaderHandler, + nodesMap map[uint32][]*TestProcessorNode, +) { + var v interface{} + var ok bool + + // all nodes in metachain have the block headers in pool as interceptor validates them + for shHeader, header := range headers { + headerBytes, _ := TestMarshalizer.Marshal(header) + headerHash := TestHasher.Compute(string(headerBytes)) + + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + if shHeader == sharding.MetachainShardId { + v, ok = metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + } else { + v, ok = metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + } + + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shards need to have their own shard headers and metachain headers + for sh, nodesList := range nodesMap { + if sh == sharding.MetachainShardId { + continue + } + + if sh != shHeader && shHeader != sharding.MetachainShardId { + continue + } + + for _, node := range nodesList { + if shHeader == sharding.MetachainShardId { + v, ok = node.ShardDataPool.MetaBlocks().Get(headerHash) + } else { + v, ok = node.ShardDataPool.Headers().Get(headerHash) + } + assert.True(t, ok) + assert.Equal(t, header, v) + } + } + } +} From 086dd373ecd1bfb0d213b0bcc5279bd66b972017 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 17 Sep 2019 15:46:38 +0300 Subject: [PATCH 104/234] integrationTest: remove warnings --- .../multiShard/block/executingRewardMiniblocks_test.go | 3 ++- integrationTests/testInitializer.go | 4 ++-- integrationTests/testProcessorNodeWithMultisigner.go | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 2c6d837796e..826eb92a3b8 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -69,6 +69,7 @@ func TestExecuteBlocksWithOnlyRewards(t *testing.T) { mapRewardsForAddress := make(map[string]uint32) nbTxsForLeaderAddress := make(map[string]uint32) + rewardValue := 1000 gasPrice := uint64(10) gasLimit := uint64(100) feePerTxForLeader := gasPrice * gasLimit / 2 @@ -128,7 +129,7 @@ func TestExecuteBlocksWithOnlyRewards(t *testing.T) { assert.Nil(t, err) nbProposedTxs := nbTxsForLeaderAddress[address] - expectedBalance := int64(nbRewards)*1000 + int64(nbProposedTxs)*int64(feePerTxForLeader) + expectedBalance := int64(nbRewards)*int64(rewardValue) + int64(nbProposedTxs)*int64(feePerTxForLeader) fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index f32c90a2b38..f46d3287bc3 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -786,7 +786,7 @@ func skToPk(sk crypto.PrivateKey) []byte { return pkBuff } -// TestPrivateKeyHasBalance checks if the account corresponding to the given public key has the expected balance +// TestPublicKeyHasBalance checks if the account corresponding to the given public key has the expected balance func TestPublicKeyHasBalance(t *testing.T, n *TestProcessorNode, pk crypto.PublicKey, expectedBalance *big.Int) { pkBuff, _ := pk.ToByteArray() addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) @@ -856,7 +856,7 @@ func CreateAndSendIntraShardTransactions( sendersPrivKeysMap := make(map[uint32][]crypto.PrivateKey) receiversPubKeysMap := make(map[uint32][]crypto.PublicKey) - for shardId, _ := range nodes { + for shardId := range nodes { if shardId == sharding.MetachainShardId { continue } diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 1c32d70dd4a..637800a962b 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -217,13 +217,13 @@ func AllShardsProposeBlock( newRandomness := make(map[uint32][]byte) // propose blocks - for i, _ := range nodesMap { + for i := range nodesMap { body[i], header[i], _, consensusNodes[i] = ProposeBlockWithConsensusSignature(i, nodesMap, round, nonce, prevRandomness[i]) newRandomness[i] = header[i].GetRandSeed() } // propagate blocks - for i, _ := range nodesMap { + for i := range nodesMap { consensusNodes[i][0].BroadcastBlock(body[i], header[i]) consensusNodes[i][0].CommitBlock(body[i], header[i]) } From c3207f365ccec8a577c69408dc10be0c9b9b33fa Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 17 Sep 2019 16:43:27 +0300 Subject: [PATCH 105/234] EN-3887 : modify tests --- cmd/node/factory/structs.go | 16 +++++ process/block/poolscleaner/nilpoolscleaner.go | 28 +++++++++ .../poolscleaner.go} | 11 ++-- .../poolscleaner_test.go} | 59 +++++++++++++++---- process/block/shardblock.go | 19 ++++-- 5 files changed, 110 insertions(+), 23 deletions(-) create mode 100644 process/block/poolscleaner/nilpoolscleaner.go rename process/block/{poolsclean.go => poolscleaner/poolscleaner.go} (92%) rename process/block/{poolsclean_test.go => poolscleaner/poolscleaner_test.go} (69%) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index c60059f3218..8329bb0d366 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -53,6 +53,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" @@ -1527,6 +1528,21 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + poolsCleaner, err := poolscleaner.NewTxsPoolsCleaner( + state.AccountsAdapter, + shardCoordinator, + data.Datapool, + state.AddressConverter, + ) + if err != nil { + return nil, nil, err + } + + err = blockProcessor.SetPoolsCleaner(poolsCleaner) + if err != nil { + return nil, nil, err + } + return blockProcessor, blockTracker, nil } diff --git a/process/block/poolscleaner/nilpoolscleaner.go b/process/block/poolscleaner/nilpoolscleaner.go new file mode 100644 index 00000000000..a19c32ea7bb --- /dev/null +++ b/process/block/poolscleaner/nilpoolscleaner.go @@ -0,0 +1,28 @@ +package poolscleaner + +// NilPoolsCleaner will be used when an PoolsCleaner is required, but another one isn't necessary or available +type NilPoolsCleaner struct { +} + +// NewNilPoolsCleaner will return an instance of the struct +func NewNilPoolsCleaner() *NilPoolsCleaner { + return new(NilPoolsCleaner) +} + +// Clean method - won't do anything +func (nsh *NilPoolsCleaner) Clean(haveTime func() bool) error { + return nil +} + +// NumRemovedTxs - won't do anything +func (nsh *NilPoolsCleaner) NumRemovedTxs() uint64 { + return 0 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (nsh *NilPoolsCleaner) IsInterfaceNil() bool { + if nsh == nil { + return true + } + return false +} diff --git a/process/block/poolsclean.go b/process/block/poolscleaner/poolscleaner.go similarity index 92% rename from process/block/poolsclean.go rename to process/block/poolscleaner/poolscleaner.go index ac6b20ff6c7..48bac8a4073 100644 --- a/process/block/poolsclean.go +++ b/process/block/poolscleaner/poolscleaner.go @@ -1,10 +1,9 @@ -package block +package poolscleaner import ( "sync" "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" @@ -16,7 +15,7 @@ type TxPoolsCleaner struct { accounts state.AccountsAdapter shardCoordinator sharding.Coordinator dataPool dataRetriever.PoolsHolder - addrConverter *addressConverters.PlainAddressConverter + addrConverter state.AddressConverter numRemovedTxs uint64 mutNumRemovedTxs sync.RWMutex } @@ -26,6 +25,7 @@ func NewTxsPoolsCleaner( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, dataPool dataRetriever.PoolsHolder, + addrConverter state.AddressConverter, ) (*TxPoolsCleaner, error) { if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter @@ -40,9 +40,8 @@ func NewTxsPoolsCleaner( if transactionPool == nil { return nil, process.ErrNilTransactionPool } - addrConverter, err := addressConverters.NewPlainAddressConverter(32, "0x") - if err != nil { - return nil, err + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter } return &TxPoolsCleaner{ diff --git a/process/block/poolsclean_test.go b/process/block/poolscleaner/poolscleaner_test.go similarity index 69% rename from process/block/poolsclean_test.go rename to process/block/poolscleaner/poolscleaner_test.go index efc18e8bc50..dd76a4813a6 100644 --- a/process/block/poolsclean_test.go +++ b/process/block/poolscleaner/poolscleaner_test.go @@ -1,16 +1,18 @@ -package block_test +package poolscleaner_test import ( "math/big" + "reflect" "testing" "time" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" "github.com/stretchr/testify/assert" @@ -64,12 +66,39 @@ func initDataPoolTransactions() *mock.PoolsHolderStub { } } +func initDataPool(testHash []byte) *mock.PoolsHolderStub { + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + } + }, + } + return sdp +} + func TestNewTxsPoolsCleaner_NilAccountsShouldErr(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPool([]byte("test")) - txsPoolsCleaner, err := block.NewTxsPoolsCleaner(nil, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(nil, shardCoordinator, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -82,7 +111,8 @@ func TestNewTxsPoolsCleaner_NilShardCoordinatorShouldErr(t *testing.T) { balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) tdp := initDataPool([]byte("test")) - txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, nil, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, nil, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -95,7 +125,8 @@ func TestNewTxsPoolsCleaner_NilDataPoolShouldErr(t *testing.T) { balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() - txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, nil) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, nil, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -113,7 +144,8 @@ func TestNewTxsPoolsCleaner_NilTransactionPoolShouldErr(t *testing.T) { return nil }, } - txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilTransactionPool, err) @@ -127,7 +159,8 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPool([]byte("test")) - txsPoolsCleaner, err := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) assert.NotNil(t, txsPoolsCleaner) assert.Nil(t, err) @@ -142,7 +175,8 @@ func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolTransactions() - txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) startTime := time.Now() haveTime := func() bool { @@ -167,7 +201,8 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { } shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolTransactions() - txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) startTime := time.Now() haveTime := func() bool { @@ -190,7 +225,8 @@ func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolTransactions() - txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) startTime := time.Now() haveTime := func() bool { @@ -212,7 +248,8 @@ func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolTransactions() - txsPoolsCleaner, _ := block.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) err := txsPoolsCleaner.Clean(nil) assert.Equal(t, process.ErrNilHaveTimeHandler, err) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 4a7f6e546c8..cd1a010d85d 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,6 +1,7 @@ package block import ( + "errors" "fmt" "sort" "sync" @@ -17,6 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -111,11 +113,6 @@ func NewShardProcessor( return nil, err } - txsPoolsCleaner, err := NewTxsPoolsCleaner(accounts, shardCoordinator, dataPool) - if err != nil { - return nil, err - } - sp := shardProcessor{ core: core, baseProcessor: base, @@ -123,7 +120,7 @@ func NewShardProcessor( blocksTracker: blocksTracker, txCoordinator: txCoordinator, txCounter: NewTransactionCounter(), - txsPoolsCleaner: txsPoolsCleaner, + txsPoolsCleaner: poolscleaner.NewNilPoolsCleaner(), } sp.chRcvAllMetaHdrs = make(chan bool) @@ -148,6 +145,16 @@ func NewShardProcessor( return &sp, nil } +// SetPoolsCleaner will set pool cleaner +func (sp *shardProcessor) SetPoolsCleaner(poolsCleaner process.PoolsCleaner) error { + if poolsCleaner == nil { + return errors.New("nil pools cleaner") + } + sp.txsPoolsCleaner = poolsCleaner + + return nil +} + // ProcessBlock processes a block. It returns nil if all ok or the specific error func (sp *shardProcessor) ProcessBlock( chainHandler data.ChainHandler, From bbffbe11998a56a8693db68eda9e515a1e4faafc Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 18 Sep 2019 10:02:29 +0300 Subject: [PATCH 106/234] integrationTests: refactor rewards integration test --- .../block/executingMiniblocks_test.go | 6 +- .../block/executingRewardMiniblocks_test.go | 141 +++++++++++------- integrationTests/testInitializer.go | 46 +++--- .../testProcessorNodeWithMultisigner.go | 7 +- 4 files changed, 114 insertions(+), 86 deletions(-) diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index e8bb7e873b2..c9b7b998bee 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -73,6 +73,9 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } } + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) + fmt.Println("Generating transactions...") integrationTests.GenerateAndDisseminateTxs( proposerNode, @@ -85,9 +88,6 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) - fmt.Println("Minting sender addresses...") - integrationTests.CreateMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) - round = integrationTests.IncrementAndPrintRound(round) nonce++ roundsToWait := 6 diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 826eb92a3b8..84ffd5669a9 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" "math/big" - "reflect" "testing" "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestExecuteBlocksWithOnlyRewards(t *testing.T) { +func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } @@ -57,57 +57,35 @@ func TestExecuteBlocksWithOnlyRewards(t *testing.T) { } }() - randomness := generateInitialRandomness(uint32(nbShards)) + gasPrice := uint64(10) + gasLimit := uint64(100) + valToTransfer := big.NewInt(100) + nbTxsPerShard := uint32(100) + mintValue := big.NewInt(1000000) + + generateIntraShardTransactions(nodesMap, nbTxsPerShard, mintValue, valToTransfer, gasPrice, gasLimit) + round := uint64(1) nonce := uint64(1) nbBlocksProduced := 7 - nbTxsPerShard := 100 - mintValue := big.NewInt(1000000) + randomness := generateInitialRandomness(uint32(nbShards)) var headers map[uint32]data.HeaderHandler var consensusNodes map[uint32][]*integrationTests.TestProcessorNode mapRewardsForAddress := make(map[string]uint32) nbTxsForLeaderAddress := make(map[string]uint32) - rewardValue := 1000 - gasPrice := uint64(10) - gasLimit := uint64(100) - feePerTxForLeader := gasPrice * gasLimit / 2 - valToTransfer := big.NewInt(100) - - sendersPrivateKeys := integrationTests.CreateAndSendIntraShardTransactions( - nodesMap, - nbTxsPerShard, - gasPrice, - gasLimit, - valToTransfer, - ) - - for shardId, nodes := range nodesMap { - if shardId == sharding.MetachainShardId { - continue - } - - fmt.Println("Minting sender addresses...") - integrationTests.CreateMintingForSenders( - nodes, - shardId, - sendersPrivateKeys[shardId], - mintValue, - ) - } - for i := 0; i < nbBlocksProduced; i++ { _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) for shardId, consensusGroup := range consensusNodes { addrRewards := consensusGroup[0].SpecialAddressHandler.ConsensusRewardAddresses() updateExpectedRewards(mapRewardsForAddress, addrRewards) - nbTxs := transactionsFromHeaderInShard(t, headers, shardId) + nbTxs := getTransactionsFromHeaderInShard(t, headers, shardId) // without metachain nodes for now if len(addrRewards) > 0 { - updateNbTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) + updateNumberTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) } } @@ -120,20 +98,47 @@ func TestExecuteBlocksWithOnlyRewards(t *testing.T) { time.Sleep(time.Second) - for address, nbRewards := range mapRewardsForAddress { - addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) - shard := nodesMap[0][0].ShardCoordinator.ComputeId(addrContainer) + verifyRewards(t, nodesMap, mapRewardsForAddress, nbTxsForLeaderAddress, gasPrice, gasLimit) +} - for _, shardNode := range nodesMap[shard] { - acc, err := shardNode.AccntState.GetExistingAccount(addrContainer) - assert.Nil(t, err) +func generateIntraShardTransactions( + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + nbTxsPerShard uint32, + mintValue *big.Int, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) { + sendersPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) - nbProposedTxs := nbTxsForLeaderAddress[address] - expectedBalance := int64(nbRewards)*int64(rewardValue) + int64(nbProposedTxs)*int64(feePerTxForLeader) - fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) - assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) + for shardId, nodes := range nodesMap { + if shardId == sharding.MetachainShardId { + continue } + + sendersPrivateKeys[shardId], receiversPublicKeys[shardId] = integrationTests.CreateSendersAndReceiversInShard( + nodes[0], + nbTxsPerShard, + ) + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders( + nodes, + shardId, + sendersPrivateKeys[shardId], + mintValue, + ) } + + integrationTests.CreateAndSendTransactions( + nodesMap, + sendersPrivateKeys, + receiversPublicKeys, + gasPrice, + gasLimit, + valToTransfer, + ) } func getBlockProposersIndexes( @@ -145,7 +150,7 @@ func getBlockProposersIndexes( for sh, testNodeList := range nodesMap { for k, testNode := range testNodeList { - if reflect.DeepEqual(consensusMap[sh][0], testNode) { + if consensusMap[sh][0] == testNode { indexProposer[sh] = k } } @@ -166,7 +171,7 @@ func generateInitialRandomness(nbShards uint32) map[uint32][]byte { return randomness } -func transactionsFromHeaderInShard(t *testing.T, headers map[uint32]data.HeaderHandler, shardId uint32) uint32 { +func getTransactionsFromHeaderInShard(t *testing.T, headers map[uint32]data.HeaderHandler, shardId uint32) uint32 { if shardId == sharding.MetachainShardId { return 0 } @@ -196,16 +201,12 @@ func updateExpectedRewards(rewardsForAddress map[string]uint32, addresses []stri if addresses[i] == "" { continue } - currentRewards, ok := rewardsForAddress[addresses[i]] - if !ok { - currentRewards = 0 - } - rewardsForAddress[addresses[i]] = currentRewards + 1 + rewardsForAddress[addresses[i]]++ } } -func updateNbTransactionsProposed( +func updateNumberTransactionsProposed( t *testing.T, transactionsForLeader map[string]uint32, addressProposer string, @@ -215,10 +216,34 @@ func updateNbTransactionsProposed( assert.Error(t, errors.New("invalid address")) } - proposedTransactions, ok := transactionsForLeader[addressProposer] - if !ok { - proposedTransactions = 0 - } + transactionsForLeader[addressProposer] += nbTransactions +} + +func verifyRewards( + t *testing.T, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + mapRewardsForAddress map[string]uint32, + nbTxsForLeaderAddress map[string]uint32, + gasPrice uint64, + gasLimit uint64, +) { - transactionsForLeader[addressProposer] = proposedTransactions + nbTransactions + // TODO: rewards and fee percentage should be read from protocol config + rewardValue := 1000 + feePerTxForLeader := gasPrice * gasLimit / 2 + + for address, nbRewards := range mapRewardsForAddress { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) + shard := nodesMap[0][0].ShardCoordinator.ComputeId(addrContainer) + + for _, shardNode := range nodesMap[shard] { + acc, err := shardNode.AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + nbProposedTxs := nbTxsForLeaderAddress[address] + expectedBalance := int64(nbRewards)*int64(rewardValue) + int64(nbProposedTxs)*int64(feePerTxForLeader) + fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) + assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) + } + } } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index f46d3287bc3..6e91d7d882c 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -845,38 +845,44 @@ func GenerateSkAndPkInShard( return sk, pk, keyGen } -// CreateAndSendIntraShardTransactions Creates and sends intra shard transactions, returning the senders private keys -func CreateAndSendIntraShardTransactions( +// CreateSendersAndReceiversInShard creates given number of sender private key and receiver public key pairs, +// with account in same shard as given node +func CreateSendersAndReceiversInShard( + nodeInShard *TestProcessorNode, + nbSenderReceiverPairs uint32, +) ([]crypto.PrivateKey, []crypto.PublicKey) { + shardId := nodeInShard.ShardCoordinator.SelfId() + receiversPublicKeys := make([]crypto.PublicKey, nbSenderReceiverPairs) + sendersPrivateKeys := make([]crypto.PrivateKey, nbSenderReceiverPairs) + + for i := uint32(0); i < nbSenderReceiverPairs; i++ { + sendersPrivateKeys[i], _, _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + _, receiversPublicKeys[i], _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + } + + return sendersPrivateKeys, receiversPublicKeys +} + +// CreateAndSendTransactions creates and sends transactions between given senders and receivers. +func CreateAndSendTransactions( nodes map[uint32][]*TestProcessorNode, - nbTxsPerShard int, + sendersPrivKeysMap map[uint32][]crypto.PrivateKey, + receiversPubKeysMap map[uint32][]crypto.PublicKey, gasPricePerTx uint64, gasLimitPerTx uint64, valueToTransfer *big.Int, -) map[uint32][]crypto.PrivateKey { - sendersPrivKeysMap := make(map[uint32][]crypto.PrivateKey) - receiversPubKeysMap := make(map[uint32][]crypto.PublicKey) - +) { for shardId := range nodes { if shardId == sharding.MetachainShardId { continue } - nodeInShard := nodes[0][0] - shardId := nodeInShard.ShardCoordinator.SelfId() - receiversPublicKeys := make([]crypto.PublicKey, nbTxsPerShard) - sendersPrivateKeys := make([]crypto.PrivateKey, nbTxsPerShard) - - for i := 0; i < nbTxsPerShard; i++ { - sendersPrivateKeys[i], _, _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) - _, receiversPublicKeys[i], _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) - } - sendersPrivKeysMap[shardId] = sendersPrivateKeys - receiversPubKeysMap[shardId] = receiversPublicKeys + nodeInShard := nodes[shardId][0] fmt.Println("Generating transactions...") GenerateAndDisseminateTxs( nodeInShard, - sendersPrivateKeys, + sendersPrivKeysMap[shardId], receiversPubKeysMap, valueToTransfer, gasPricePerTx, @@ -886,8 +892,6 @@ func CreateAndSendIntraShardTransactions( fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) - - return sendersPrivKeysMap } // CreateMintingForSenders creates account with balances for every node in a given shard diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 637800a962b..8d33125961d 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" @@ -178,8 +179,7 @@ func DoConsensusSigningOnBlock( // clear signature, as we need to compute it below blockHeader.SetSignature(nil) blockHeader.SetPubKeysBitmap(nil) - blockHeaderBytes, _ := TestMarshalizer.Marshal(blockHeader) - blockHeaderHash := TestHasher.Compute(string(blockHeaderBytes)) + blockHeaderHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, blockHeader) var msig crypto.MultiSigner msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) @@ -257,8 +257,7 @@ func VerifyNodesHaveHeaders( // all nodes in metachain have the block headers in pool as interceptor validates them for shHeader, header := range headers { - headerBytes, _ := TestMarshalizer.Marshal(header) - headerHash := TestHasher.Compute(string(headerBytes)) + headerHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, header) for _, metaNode := range nodesMap[sharding.MetachainShardId] { if shHeader == sharding.MetachainShardId { From 6761654394b8e15d3f16a5102cc6b095fa53970e Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 18 Sep 2019 10:24:34 +0300 Subject: [PATCH 107/234] EN-3887 : fix a part of findings --- process/block/poolscleaner/poolscleaner.go | 24 ++++------ .../block/poolscleaner/poolscleaner_test.go | 44 +++++++++++++------ process/block/shardblock.go | 2 +- 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/process/block/poolscleaner/poolscleaner.go b/process/block/poolscleaner/poolscleaner.go index 48bac8a4073..a5d794ac18d 100644 --- a/process/block/poolscleaner/poolscleaner.go +++ b/process/block/poolscleaner/poolscleaner.go @@ -1,7 +1,7 @@ package poolscleaner import ( - "sync" + "sync/atomic" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -17,7 +17,6 @@ type TxPoolsCleaner struct { dataPool dataRetriever.PoolsHolder addrConverter state.AddressConverter numRemovedTxs uint64 - mutNumRemovedTxs sync.RWMutex } // NewTxsPoolsCleaner will return a new transaction pools cleaner @@ -33,7 +32,7 @@ func NewTxsPoolsCleaner( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if dataPool == nil { + if dataPool == nil || dataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } transactionPool := dataPool.Transactions() @@ -80,6 +79,8 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { tx, ok := obj.(*transaction.Transaction) if !ok { + atomic.AddUint64(&tpc.numRemovedTxs, 1) + txsPool.Remove(key) continue } @@ -87,14 +88,14 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { addr, err := tpc.addrConverter.CreateAddressFromPublicKeyBytes(sndAddr) if err != nil { txsPool.Remove(key) - tpc.incrementNumRemovedTxs() + atomic.AddUint64(&tpc.numRemovedTxs, 1) continue } accountHandler, err := tpc.accounts.GetExistingAccount(addr) if err != nil { txsPool.Remove(key) - tpc.incrementNumRemovedTxs() + atomic.AddUint64(&tpc.numRemovedTxs, 1) continue } @@ -103,7 +104,7 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { lowerNonceInTx := txNonce < accountNonce if lowerNonceInTx { txsPool.Remove(key) - tpc.incrementNumRemovedTxs() + atomic.AddUint64(&tpc.numRemovedTxs, 1) } } } @@ -111,16 +112,7 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { return nil } -func (tpc *TxPoolsCleaner) incrementNumRemovedTxs() { - tpc.mutNumRemovedTxs.Lock() - tpc.numRemovedTxs++ - tpc.mutNumRemovedTxs.Unlock() -} - // NumRemovedTxs will return the number of removed txs from pools func (tpc *TxPoolsCleaner) NumRemovedTxs() uint64 { - tpc.mutNumRemovedTxs.Lock() - defer tpc.mutNumRemovedTxs.Unlock() - - return tpc.numRemovedTxs + return atomic.LoadUint64(&tpc.numRemovedTxs) } diff --git a/process/block/poolscleaner/poolscleaner_test.go b/process/block/poolscleaner/poolscleaner_test.go index dd76a4813a6..11fec97d053 100644 --- a/process/block/poolscleaner/poolscleaner_test.go +++ b/process/block/poolscleaner/poolscleaner_test.go @@ -1,8 +1,8 @@ package poolscleaner_test import ( + "bytes" "math/big" - "reflect" "testing" "time" @@ -27,7 +27,11 @@ func getAccAdapter(nonce uint64, balance *big.Int) *mock.AccountsStub { return accDB } -func initDataPoolTransactions() *mock.PoolsHolderStub { +func initDataPoolWithFourTransactions() *mock.PoolsHolderStub { + delayedFetchingKey := "key1" + validTxKey := "key2" + invalidTxKey := "key3" + return &mock.PoolsHolderStub{ TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{ @@ -36,22 +40,22 @@ func initDataPoolTransactions() *mock.PoolsHolderStub { return &mock.CacherStub{ PeekCalled: func(key []byte) (value interface{}, ok bool) { switch string(key) { - case "key1": + case delayedFetchingKey: time.Sleep(time.Second) return &transaction.Transaction{Nonce: 10}, true - case "key2": + case validTxKey: return &transaction.Transaction{ Nonce: 10, SndAddr: []byte("address_address_address_address_"), }, true - case "key3": + case invalidTxKey: return &smartContractResult.SmartContractResult{}, true default: return nil, false } }, KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2"), []byte("key3"), []byte("key4")} + return [][]byte{[]byte(delayedFetchingKey), []byte(validTxKey), []byte(invalidTxKey), []byte("key4")} }, LenCalled: func() int { return 0 @@ -73,7 +77,7 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { ShardDataStoreCalled: func(id string) (c storage.Cacher) { return &mock.CacherStub{ PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { + if bytes.Equal(key, testHash) { return &transaction.Transaction{Nonce: 10}, true } return nil, false @@ -151,6 +155,20 @@ func TestNewTxsPoolsCleaner_NilTransactionPoolShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilTransactionPool, err) } +func TestNewTxsPoolsCleaner_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, nil) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { t.Parallel() @@ -169,18 +187,18 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { t.Parallel() - cleanDurationSeconds := 1.0 + cleaningTimeNumSeconds := 1.0 nonce := uint64(1) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() - tdp := initDataPoolTransactions() + tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) startTime := time.Now() haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds + return time.Now().Sub(startTime).Seconds() < cleaningTimeNumSeconds } err := txsPoolsCleaner.Clean(haveTime) @@ -200,7 +218,7 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { }, } shardCoordinator := mock.NewOneShardCoordinatorMock() - tdp := initDataPoolTransactions() + tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) @@ -224,7 +242,7 @@ func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() - tdp := initDataPoolTransactions() + tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) @@ -247,7 +265,7 @@ func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() - tdp := initDataPoolTransactions() + tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index cd1a010d85d..a2ca574205f 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -24,7 +24,7 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler" ) -const cleaningTime = 1.0 +const cleaningTime = 1 // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { From 7ef908f347d2f592a56f8752f62e3f0a3e40d406 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 18 Sep 2019 11:39:11 +0300 Subject: [PATCH 108/234] integrationTests: add test for only protocol rewards --- .../block/executingRewardMiniblocks_test.go | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 84ffd5669a9..38a1eb19af9 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -101,6 +101,75 @@ func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { verifyRewards(t, nodesMap, mapRewardsForAddress, nbTxsForLeaderAddress, gasPrice, gasLimit) } +func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + + randomness := generateInitialRandomness(uint32(nbShards)) + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForAddress := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for _, consensusGroup := range consensusNodes { + addrRewards := consensusGroup[0].SpecialAddressHandler.ConsensusRewardAddresses() + updateExpectedRewards(mapRewardsForAddress, addrRewards) + } + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + verifyRewards(t, nodesMap, mapRewardsForAddress, nbTxsForLeaderAddress, 0, 0) +} + + func generateIntraShardTransactions( nodesMap map[uint32][]*integrationTests.TestProcessorNode, nbTxsPerShard uint32, From a97faf45c9493b4e7f99fc43d716388bf41ff8c5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 18 Sep 2019 18:35:38 +0300 Subject: [PATCH 109/234] added integration test for empty block double signing --- integrationTests/sync/basicSync_test.go | 97 +++++++++++++++++++++++++ integrationTests/testInitializer.go | 2 +- 2 files changed, 98 insertions(+), 1 deletion(-) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 0f028e49429..0f0e64da7d6 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" @@ -86,6 +88,88 @@ func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) } +func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + maxShards := uint32(1) + shardId := uint32(0) + numNodesPerShard := 6 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + + nodes := make([]*integrationTests.TestProcessorNode, numNodesPerShard) + for i := 0; i < numNodesPerShard; i++ { + nodes[i] = integrationTests.NewTestSyncNode( + maxShards, + shardId, + shardId, + advertiserAddr, + ) + } + + idxProposerShard0 := 0 + idxProposers := []int{idxProposerShard0} + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } + }() + + for _, n := range nodes { + _ = n.Messenger.Bootstrap() + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(delayP2pBootstrap) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + nonce++ + + numRoundsToTest := 2 + for i := 0; i < numRoundsToTest; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + + time.Sleep(stepDelay) + + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + nonce++ + } + + pubKeysVariant1 := []byte("1") + pubKeysVariant2 := []byte("2") + + proposeBlockWithPubKeyBitmap(nodes[idxProposerShard0], round, nonce, pubKeysVariant1) + proposeBlockWithPubKeyBitmap(nodes[1], round, nonce, pubKeysVariant2) + + time.Sleep(stepDelay) + + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + + time.Sleep(3 * stepDelay) + + testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) + testAllNodesHaveSameLastBlock(t, nodes) +} + +func proposeBlockWithPubKeyBitmap(n *integrationTests.TestProcessorNode, round uint64, nonce uint64, pubKeys []byte) { + body, header, _ := n.ProposeBlock(round, nonce) + header.SetPubKeysBitmap(pubKeys) + n.BroadcastBlock(body, header) + n.CommitBlock(body, header) +} + func testAllNodesHaveTheSameBlockHeightInBlockchain(t *testing.T, nodes []*integrationTests.TestProcessorNode) { expectedNonce := nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce() for i := 1; i < len(nodes); i++ { @@ -97,6 +181,19 @@ func testAllNodesHaveTheSameBlockHeightInBlockchain(t *testing.T, nodes []*integ } } +func testAllNodesHaveSameLastBlock(t *testing.T, nodes []*integrationTests.TestProcessorNode) { + mapBlocksByHash := make(map[string]data.HeaderHandler) + + for _, n := range nodes { + hdr := n.BlockChain.GetCurrentBlockHeader() + buff, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, hdr) + + mapBlocksByHash[string(buff)] = hdr + } + + assert.Equal(t, 1, len(mapBlocksByHash)) +} + func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { for _, n := range nodes { n.Rounder.IndexField = int64(round) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 4b0b0597439..f2cc989a9ce 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -493,7 +493,7 @@ func IncrementAndPrintRound(round uint64) uint64 { return round } -// ProposeBlock proposes a block with SC txs for every shard +// ProposeBlock proposes a block for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { fmt.Println("All shards propose blocks...") for idx, n := range nodes { From baec288e9588692d06ac2f1ea745bae924b73359 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 18 Sep 2019 20:33:57 +0300 Subject: [PATCH 110/234] data, process, integrationTests: add rewards for metachain --- data/address/specialAddresses.go | 80 ++++-- data/{CapnpHelper.go => capnpHelper.go} | 0 data/consensusRewardData.go | 8 + data/feeTx/capnp/schema.capnp | 19 -- data/feeTx/capnp/schema.capnp.go | 229 ------------------ data/feeTx/feeTx.go | 116 --------- data/feeTx/feeTx_test.go | 67 ----- .../mock/specialAddressHandlerMock.go | 40 ++- process/block/export_test.go | 2 +- process/block/preprocess/rewardsHandler.go | 64 ++++- process/block/shardblock.go | 81 +++++-- process/errors.go | 3 + process/interface.go | 9 +- process/mock/specialAddressHandlerMock.go | 59 ++++- 14 files changed, 265 insertions(+), 512 deletions(-) rename data/{CapnpHelper.go => capnpHelper.go} (100%) create mode 100644 data/consensusRewardData.go delete mode 100644 data/feeTx/capnp/schema.capnp delete mode 100644 data/feeTx/capnp/schema.capnp.go delete mode 100644 data/feeTx/feeTx.go delete mode 100644 data/feeTx/feeTx_test.go diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index dffc427681b..fd7a8cce71a 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -7,24 +7,23 @@ import ( ) type specialAddresses struct { - elrond []byte - consensusRewardAddresses []string - burnAddress []byte + elrondAddress []byte + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData + burnAddress []byte - epoch uint32 - round uint64 adrConv state.AddressConverter shardCoordinator sharding.Coordinator } // NewSpecialAddressHolder creates a special address holder func NewSpecialAddressHolder( - elrond []byte, + elrondAddress []byte, burnAddress []byte, adrConv state.AddressConverter, shardCoordinator sharding.Coordinator, ) (*specialAddresses, error) { - if elrond == nil { + if elrondAddress == nil { return nil, data.ErrNilElrondAddress } if burnAddress == nil { @@ -38,10 +37,11 @@ func NewSpecialAddressHolder( } sp := &specialAddresses{ - elrond: elrond, - burnAddress: burnAddress, - adrConv: adrConv, - shardCoordinator: shardCoordinator, + elrondAddress: elrondAddress, + burnAddress: burnAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + metaConsensusData: make([]*data.ConsensusRewardData, 0), } return sp, nil @@ -49,12 +49,12 @@ func NewSpecialAddressHolder( // SetElrondCommunityAddress sets elrond address func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { - sp.elrond = elrond + sp.elrondAddress = elrond } // ElrondCommunityAddress provides elrond address func (sp *specialAddresses) ElrondCommunityAddress() []byte { - return sp.elrond + return sp.elrondAddress } // BurnAddress provides burn address @@ -63,34 +63,62 @@ func (sp *specialAddresses) BurnAddress() []byte { } // SetConsensusData sets the consensus rewards addresses for the round -func (sp *specialAddresses) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sp.consensusRewardAddresses = consensusRewardAddresses - sp.round = round - sp.epoch = epoch +func (sp *specialAddresses) SetConsensusData(rewardAddresses []string, round uint64, epoch uint32) { + sp.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + } +} + +// ConsensusShardRewardAddresses provides the consensus reward addresses +func (sp *specialAddresses) ConsensusShardRewardData() *data.ConsensusRewardData { + return sp.shardConsensusData +} + +// SetMetaConsensusData sets the rewards addresses for the metachain nodes +func (sp *specialAddresses) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { + sp.metaConsensusData = append(sp.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + }) +} + +// ClearMetaConsensusData clears the previously set addresses for rewarding metachain nodes +func (sp *specialAddresses) ClearMetaConsensusData() { + sp.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sp *specialAddresses) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sp.metaConsensusData } // LeaderAddress provides leader address func (sp *specialAddresses) LeaderAddress() []byte { - if len(sp.consensusRewardAddresses) == 0 { + if sp.shardConsensusData == nil || len(sp.shardConsensusData.Addresses) == 0 { return nil } - return []byte(sp.consensusRewardAddresses[0]) -} - -// ConsensusRewardAddresses provides the consensus reward addresses -func (sp *specialAddresses) ConsensusRewardAddresses() []string { - return sp.consensusRewardAddresses + return []byte(sp.shardConsensusData.Addresses[0]) } // Round returns the round for the current block func (sp *specialAddresses) Round() uint64 { - return sp.round + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Round } // Epoch returns the epoch for the current block func (sp *specialAddresses) Epoch() uint32 { - return sp.epoch + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Epoch } // ShardIdForAddress calculates shard id for address diff --git a/data/CapnpHelper.go b/data/capnpHelper.go similarity index 100% rename from data/CapnpHelper.go rename to data/capnpHelper.go diff --git a/data/consensusRewardData.go b/data/consensusRewardData.go new file mode 100644 index 00000000000..731838d5322 --- /dev/null +++ b/data/consensusRewardData.go @@ -0,0 +1,8 @@ +package data + +// ConsensusRewardData holds the required data for rewarding validators in a specific round and epoch +type ConsensusRewardData struct { + Round uint64 + Epoch uint32 + Addresses []string +} diff --git a/data/feeTx/capnp/schema.capnp b/data/feeTx/capnp/schema.capnp deleted file mode 100644 index 6282c429d29..00000000000 --- a/data/feeTx/capnp/schema.capnp +++ /dev/null @@ -1,19 +0,0 @@ -@0xff99b03cb6309633; -using Go = import "/go.capnp"; -$Go.package("capnp"); -$Go.import("_"); - - -struct FeeTxCapn { - nonce @0: UInt64; - value @1: Data; - rcvAddr @2: Data; - shardId @3: UInt32; -} - -##compile with: - -## -## -## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/feeTx/capnp/schema.capnp - diff --git a/data/feeTx/capnp/schema.capnp.go b/data/feeTx/capnp/schema.capnp.go deleted file mode 100644 index 185949c0f7c..00000000000 --- a/data/feeTx/capnp/schema.capnp.go +++ /dev/null @@ -1,229 +0,0 @@ -package capnp - -// AUTO GENERATED - DO NOT EDIT - -import ( - "bufio" - "bytes" - "encoding/json" - C "github.com/glycerine/go-capnproto" - "io" -) - -type FeeTxCapn C.Struct - -func NewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStruct(16, 2)) } -func NewRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewRootStruct(16, 2)) } -func AutoNewFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.NewStructAR(16, 2)) } -func ReadRootFeeTxCapn(s *C.Segment) FeeTxCapn { return FeeTxCapn(s.Root(0).ToStruct()) } -func (s FeeTxCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } -func (s FeeTxCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } -func (s FeeTxCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } -func (s FeeTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } -func (s FeeTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } -func (s FeeTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } -func (s FeeTxCapn) ShardId() uint32 { return C.Struct(s).Get32(8) } -func (s FeeTxCapn) SetShardId(v uint32) { C.Struct(s).Set32(8, v) } -func (s FeeTxCapn) WriteJSON(w io.Writer) error { - b := bufio.NewWriter(w) - var err error - var buf []byte - _ = buf - err = b.WriteByte('{') - if err != nil { - return err - } - _, err = b.WriteString("\"nonce\":") - if err != nil { - return err - } - { - s := s.Nonce() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"value\":") - if err != nil { - return err - } - { - s := s.Value() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"rcvAddr\":") - if err != nil { - return err - } - { - s := s.RcvAddr() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"shardId\":") - if err != nil { - return err - } - { - s := s.ShardId() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte('}') - if err != nil { - return err - } - err = b.Flush() - return err -} -func (s FeeTxCapn) MarshalJSON() ([]byte, error) { - b := bytes.Buffer{} - err := s.WriteJSON(&b) - return b.Bytes(), err -} -func (s FeeTxCapn) WriteCapLit(w io.Writer) error { - b := bufio.NewWriter(w) - var err error - var buf []byte - _ = buf - err = b.WriteByte('(') - if err != nil { - return err - } - _, err = b.WriteString("nonce = ") - if err != nil { - return err - } - { - s := s.Nonce() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("value = ") - if err != nil { - return err - } - { - s := s.Value() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("rcvAddr = ") - if err != nil { - return err - } - { - s := s.RcvAddr() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("shardId = ") - if err != nil { - return err - } - { - s := s.ShardId() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } - err = b.WriteByte(')') - if err != nil { - return err - } - err = b.Flush() - return err -} -func (s FeeTxCapn) MarshalCapLit() ([]byte, error) { - b := bytes.Buffer{} - err := s.WriteCapLit(&b) - return b.Bytes(), err -} - -type FeeTxCapn_List C.PointerList - -func NewFeeTxCapnList(s *C.Segment, sz int) FeeTxCapn_List { - return FeeTxCapn_List(s.NewCompositeList(16, 2, sz)) -} -func (s FeeTxCapn_List) Len() int { return C.PointerList(s).Len() } -func (s FeeTxCapn_List) At(i int) FeeTxCapn { return FeeTxCapn(C.PointerList(s).At(i).ToStruct()) } -func (s FeeTxCapn_List) ToArray() []FeeTxCapn { - n := s.Len() - a := make([]FeeTxCapn, n) - for i := 0; i < n; i++ { - a[i] = s.At(i) - } - return a -} -func (s FeeTxCapn_List) Set(i int, item FeeTxCapn) { C.PointerList(s).Set(i, C.Object(item)) } diff --git a/data/feeTx/feeTx.go b/data/feeTx/feeTx.go deleted file mode 100644 index 4560447e4c2..00000000000 --- a/data/feeTx/feeTx.go +++ /dev/null @@ -1,116 +0,0 @@ -package feeTx - -import ( - "io" - "math/big" - - "github.com/ElrondNetwork/elrond-go/data/feeTx/capnp" - "github.com/glycerine/go-capnproto" -) - -// FeeTx holds all the data needed for a value transfer -type FeeTx struct { - Nonce uint64 `capid:"0" json:"nonce"` - Value *big.Int `capid:"1" json:"value"` - RcvAddr []byte `capid:"2" json:"receiver"` - ShardId uint32 `capid:"3" json:"shardId"` -} - -// Save saves the serialized data of a FeeTx into a stream through Capnp protocol -func (scr *FeeTx) Save(w io.Writer) error { - seg := capn.NewBuffer(nil) - FeeTxGoToCapn(seg, scr) - _, err := seg.WriteTo(w) - return err -} - -// Load loads the data from the stream into a FeeTx object through Capnp protocol -func (scr *FeeTx) Load(r io.Reader) error { - capMsg, err := capn.ReadFromStream(r, nil) - if err != nil { - return err - } - - z := capnp.ReadRootFeeTxCapn(capMsg) - FeeTxCapnToGo(z, scr) - return nil -} - -// FeeTxCapnToGo is a helper function to copy fields from a FeeTxCapn object to a FeeTx object -func FeeTxCapnToGo(src capnp.FeeTxCapn, dest *FeeTx) *FeeTx { - if dest == nil { - dest = &FeeTx{} - } - - if dest.Value == nil { - dest.Value = big.NewInt(0) - } - - dest.Nonce = src.Nonce() - err := dest.Value.GobDecode(src.Value()) - - if err != nil { - return nil - } - - dest.RcvAddr = src.RcvAddr() - dest.ShardId = src.ShardId() - - return dest -} - -// FeeTxGoToCapn is a helper function to copy fields from a FeeTx object to a FeeTxCapn object -func FeeTxGoToCapn(seg *capn.Segment, src *FeeTx) capnp.FeeTxCapn { - dest := capnp.AutoNewFeeTxCapn(seg) - - value, _ := src.Value.GobEncode() - dest.SetNonce(src.Nonce) - dest.SetValue(value) - dest.SetRcvAddr(src.RcvAddr) - dest.SetShardId(src.ShardId) - - return dest -} - -// IsInterfaceNil verifies if underlying object is nil -func (scr *FeeTx) IsInterfaceNil() bool { - return scr == nil -} - -// GetValue returns the value of the fee transaction -func (scr *FeeTx) GetValue() *big.Int { - return scr.Value -} - -// GetData returns the data of the fee transaction -func (scr *FeeTx) GetData() string { - return "" -} - -// GetRecvAddress returns the receiver address from the fee transaction -func (scr *FeeTx) GetRecvAddress() []byte { - return scr.RcvAddr -} - -// GetSndAddress returns the sender address from the fee transaction -func (scr *FeeTx) GetSndAddress() []byte { - return nil -} - -// SetValue sets the value of the fee transaction -func (scr *FeeTx) SetValue(value *big.Int) { - scr.Value = value -} - -// SetData sets the data of the fee transaction -func (scr *FeeTx) SetData(data string) { -} - -// SetRecvAddress sets the receiver address of the fee transaction -func (scr *FeeTx) SetRecvAddress(addr []byte) { - scr.RcvAddr = addr -} - -// SetSndAddress sets the sender address of the fee transaction -func (scr *FeeTx) SetSndAddress(addr []byte) { -} diff --git a/data/feeTx/feeTx_test.go b/data/feeTx/feeTx_test.go deleted file mode 100644 index 25e6a2d6010..00000000000 --- a/data/feeTx/feeTx_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package feeTx_test - -import ( - "bytes" - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/data/feeTx" - "github.com/stretchr/testify/assert" -) - -func TestFeeTx_SaveLoad(t *testing.T) { - smrS := feeTx.FeeTx{ - Nonce: uint64(1), - Value: big.NewInt(1), - RcvAddr: []byte("receiver_address"), - ShardId: 10, - } - - var b bytes.Buffer - err := smrS.Save(&b) - assert.Nil(t, err) - - loadSMR := feeTx.FeeTx{} - err = loadSMR.Load(&b) - assert.Nil(t, err) - - assert.Equal(t, smrS, loadSMR) -} - -func TestFeeTx_GetRecvAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &feeTx.FeeTx{RcvAddr: data} - - assert.Equal(t, data, scr.RcvAddr) -} - -func TestFeeTx_GetValue(t *testing.T) { - t.Parallel() - - value := big.NewInt(10) - scr := &feeTx.FeeTx{Value: value} - - assert.Equal(t, value, scr.Value) -} - -func TestFeeTx_SetRecvAddr(t *testing.T) { - t.Parallel() - - data := []byte("data") - scr := &feeTx.FeeTx{} - scr.SetRecvAddress(data) - - assert.Equal(t, data, scr.RcvAddr) -} - -func TestFeeTx_SetValue(t *testing.T) { - t.Parallel() - - value := big.NewInt(10) - scr := &feeTx.FeeTx{} - scr.SetValue(value) - - assert.Equal(t, value, scr.Value) -} diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index a9af9a371df..9456bbf8032 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -13,20 +14,43 @@ type SpecialAddressHandlerMock struct { AdrConv state.AddressConverter ShardCoordinator sharding.Coordinator - addresses []string - epoch uint32 - round uint64 + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData } func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: consensusRewardAddresses, + } +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + }) +} + +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) } -func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { @@ -54,11 +78,11 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + return sh.shardConsensusData.Round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + return sh.shardConsensusData.Epoch } func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { diff --git a/process/block/export_test.go b/process/block/export_test.go index 3fcb1d9b503..bb9bed53ee5 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -45,7 +45,7 @@ func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint } func (sp *shardProcessor) GetProcessedMetaBlocksFromPool(body block.Body, header *block.Header) ([]data.HeaderHandler, error) { - return sp.getProcessedMetaBlocksFromPool(body, header) + return sp.getProcessedMetaBlocksFromPool(body, header.MetaBlockHashes) } func (sp *shardProcessor) RemoveProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 722d794d519..dc19eda13d6 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -32,9 +32,10 @@ type rewardsHandler struct { store dataRetriever.StorageService rewardTxPool dataRetriever.ShardedDataCacherNotifier - mutGenRewardTxs sync.RWMutex - protocolRewards []data.TransactionHandler - feeRewards []data.TransactionHandler + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + protocolRewardsMeta []data.TransactionHandler + feeRewards []data.TransactionHandler mut sync.Mutex accumulatedFees *big.Int @@ -148,11 +149,21 @@ func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { rtxh.mutGenRewardTxs.Lock() - calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.feeRewards = rtxh.createRewardFromFees() rtxh.addTransactionsToPool(rtxh.feeRewards) + + rtxh.protocolRewards = rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rtxh.protocolRewards) + + rtxh.protocolRewardsMeta = rtxh.createProtocolRewardsForMeta() + rtxh.addTransactionsToPool(rtxh.protocolRewardsMeta) + + calculatedRewardTxs := make([]data.TransactionHandler, 0) calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.Unlock() miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) @@ -222,8 +233,6 @@ func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { // CreateBlockStarted does the cleanup before creating a new block func (rtxh *rewardsHandler) CreateBlockStarted() { rtxh.cleanCachedData() - rewardTxs := rtxh.createProtocolRewards() - rtxh.addTransactionsToPool(rewardTxs) } // CreateMarshalizedData creates the marshalized data for broadcasting purposes @@ -270,6 +279,7 @@ func (rtxh *rewardsHandler) cleanCachedData() { rtxh.mutGenRewardTxs.Lock() rtxh.feeRewards = make([]data.TransactionHandler, 0) rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewardsMeta = make([]data.TransactionHandler, 0) rtxh.mutGenRewardTxs.Unlock() } @@ -345,23 +355,50 @@ func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { // createProtocolRewards creates the protocol reward transactions func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { - consensusRewardAddresses := rtxh.address.ConsensusRewardAddresses() + consensusRewardData := rtxh.address.ConsensusShardRewardData() consensusRewardTxs := make([]data.TransactionHandler, 0) - for _, address := range consensusRewardAddresses { + for _, address := range consensusRewardData.Addresses { rTx := &rewardTx.RewardTx{} rTx.Value = rewardValue rTx.RcvAddr = []byte(address) rTx.ShardId = rtxh.shardCoordinator.SelfId() - rTx.Epoch = rtxh.address.Epoch() - rTx.Round = rtxh.address.Round() + rTx.Epoch = consensusRewardData.Epoch + rTx.Round = consensusRewardData.Round consensusRewardTxs = append(consensusRewardTxs, rTx) } - rtxh.mutGenRewardTxs.Lock() - rtxh.protocolRewards = consensusRewardTxs - rtxh.mutGenRewardTxs.Unlock() + return consensusRewardTxs +} + +// createProtocolRewardsForMeta creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewardsForMeta() []data.TransactionHandler { + metaRewardsData := rtxh.address.ConsensusMetaRewardData() + consensusRewardTxs := make([]data.TransactionHandler, 0) + + for _, metaConsensusSet := range metaRewardsData { + for _, address := range metaConsensusSet.Addresses { + addr, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes([]byte(address)) + if err != nil { + log.Error(err.Error()) + continue + } + + if rtxh.shardCoordinator.ComputeId(addr) != rtxh.shardCoordinator.SelfId() { + continue + } + + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = metaConsensusSet.Epoch + rTx.Round = metaConsensusSet.Round + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + } return consensusRewardTxs } @@ -371,6 +408,7 @@ func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { calculatedRewardTxs := make([]data.TransactionHandler, 0) rtxh.mutGenRewardTxs.RLock() calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) rtxh.mutGenRewardTxs.RUnlock() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 0400a1fb57d..ad6ed897d99 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -189,17 +189,11 @@ func (sp *shardProcessor) ProcessBlock( log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) - // give transaction coordinator the consensus group validators addresses where to send the rewards. - consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( - headerHandler.GetPrevRandSeed(), - headerHandler.GetRound(), - sp.shardCoordinator.SelfId(), - ) + err = sp.setShardConsensusData(headerHandler) if err != nil { return err } - sp.SetConsensusData(consensusAddresses, headerHandler.GetRound()) sp.txCoordinator.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -250,6 +244,13 @@ func (sp *shardProcessor) ProcessBlock( } }() + processedMetaHdrs, err := sp.getProcessedMetaBlocksFromPool(body, header.MetaBlockHashes) + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return err + } + err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) if err != nil { return err @@ -268,6 +269,44 @@ func (sp *shardProcessor) ProcessBlock( return nil } +func (sp *shardProcessor) setShardConsensusData(headerHandler data.HeaderHandler) error { + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + sp.shardCoordinator.SelfId(), + ) + if err != nil { + return err + } + + sp.SetConsensusData(consensusAddresses, headerHandler.GetRound()) + + return nil +} + +func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.HeaderHandler) error { + sp.specialAddressHandler.ClearMetaConsensusData() + + // for every finalized metablock header, reward the metachain consensus group members with accounts in shard + for _, metaBlock := range finalizedMetaBlocks { + round := metaBlock.GetRound() + epoch := metaBlock.GetEpoch() + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + metaBlock.GetPrevRandSeed(), + round, + metaBlock.GetShardID(), + ) + if err != nil { + return err + } + + sp.specialAddressHandler.SetMetaConsensusData(consensusAddresses, round, epoch) + } + + return nil +} + // SetConsensusData - sets the reward addresses for the current consensus group func (sp *shardProcessor) SetConsensusData(consensusRewardAddresses []string, round uint64) { sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) @@ -661,7 +700,7 @@ func (sp *shardProcessor) CommitBlock( log.LogIfError(errNotCritical) } - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header) + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(body, header.MetaBlockHashes) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } @@ -817,12 +856,15 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr } // getProcessedMetaBlocksFromPool returns all the meta blocks fully processed -func (sp *shardProcessor) getProcessedMetaBlocksFromPool(body block.Body, header *block.Header) ([]data.HeaderHandler, error) { +func (sp *shardProcessor) getProcessedMetaBlocksFromPool( + body block.Body, + usedMetaBlockHashes [][]byte, +) ([]data.HeaderHandler, error) { if body == nil { return nil, process.ErrNilTxBlockBody } - if header == nil { - return nil, process.ErrNilBlockHeader + if usedMetaBlockHashes == nil { + return nil, process.ErrNilMetaBlockHashes } miniBlockHashes := make(map[int][]byte, 0) @@ -844,7 +886,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromPool(body block.Body, header log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range header.MetaBlockHashes { + for _, metaBlockKey := range usedMetaBlockHashes { metaBlock, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) if metaBlock == nil { log.Debug(process.ErrNilMetaBlockHeader.Error()) @@ -1394,17 +1436,22 @@ func (sp *shardProcessor) createMiniBlocks( log.Info(err.Error()) } + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(block.Body(destMeMiniBlocks), usedMetaHdrsHashes) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return nil, err + } + log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) if len(destMeMiniBlocks) > 0 { miniBlocks = append(miniBlocks, destMeMiniBlocks...) } - if !haveTime() { - log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) - return miniBlocks, nil - } - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) diff --git a/process/errors.go b/process/errors.go index 4f132a8ab35..62b7f9f9e74 100644 --- a/process/errors.go +++ b/process/errors.go @@ -46,6 +46,9 @@ var ErrNilBlockChain = errors.New("nil block chain") // ErrNilMetaBlockHeader signals that an operation has been attempted to or with a nil metablock var ErrNilMetaBlockHeader = errors.New("nil metablock header") +// ErrNilMetaBlockHashes signals that a nil array of metablock header hashes was provided +var ErrNilMetaBlockHashes = errors.New("nil metablock header hashes") + // ErrNilTxBlockBody signals that an operation has been attempted to or with a nil tx block body var ErrNilTxBlockBody = errors.New("nil tx block body") diff --git a/process/interface.go b/process/interface.go index c31497302a8..69d38a7acd7 100644 --- a/process/interface.go +++ b/process/interface.go @@ -114,7 +114,7 @@ type TransactionVerifier interface { IsTransactionValid(tx data.TransactionHandler) error } -// UnsignedTxHandler creates and verifies unsigned transactions for current round +// TransactionFeeHandler processes the transaction fee type TransactionFeeHandler interface { ProcessTransactionFee(cost *big.Int) IsInterfaceNil() bool @@ -125,12 +125,15 @@ type SpecialAddressHandler interface { SetElrondCommunityAddress(elrond []byte) ElrondCommunityAddress() []byte SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) - ConsensusRewardAddresses() []string + ConsensusShardRewardData() *data.ConsensusRewardData + SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) + ClearMetaConsensusData() + ConsensusMetaRewardData() []*data.ConsensusRewardData LeaderAddress() []byte BurnAddress() []byte ShardIdForAddress([]byte) (uint32, error) - Round() uint64 Epoch() uint32 + Round() uint64 IsInterfaceNil() bool } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 6dc47db5627..8af0bd4c074 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -13,9 +14,8 @@ type SpecialAddressHandlerMock struct { AdrConv state.AddressConverter ShardCoordinator sharding.Coordinator - addresses []string - epoch uint32 - round uint64 + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData } func NewSpecialAddressHandlerMock( @@ -29,9 +29,12 @@ func NewSpecialAddressHandlerMock( ShardIdForAddressCalled: nil, AdrConv: addrConv, ShardCoordinator: shardCoordinator, - addresses: nil, - epoch: 0, - round: 0, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), } } @@ -39,13 +42,35 @@ func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { - sh.addresses = consensusRewardAddresses - sh.epoch = epoch - sh.round = round + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: consensusRewardAddresses, + } +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + }) } -func (sh *SpecialAddressHandlerMock) ConsensusRewardAddresses() []string { - return sh.addresses +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData } func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { @@ -65,11 +90,19 @@ func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { } func (sh *SpecialAddressHandlerMock) Round() uint64 { - return sh.round + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { - return sh.epoch + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Epoch } func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { From 1dc422466ddb066ae0d3d02dd80a203dae1739cb Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 19 Sep 2019 15:25:34 +0300 Subject: [PATCH 111/234] EN-3887 : fix after review --- cmd/node/factory/structs.go | 26 ++++---- integrationTests/mock/poolscleanerMock.go | 27 ++++++++ .../smartContract/testInitilalizer.go | 3 +- integrationTests/testProcessorNode.go | 7 +- integrationTests/testSyncNode.go | 7 +- process/block/argProcessor.go | 7 +- process/block/baseProcess_test.go | 7 +- process/block/export_test.go | 7 +- process/block/poolscleaner/nilpoolscleaner.go | 28 -------- process/block/poolscleaner/poolscleaner.go | 40 ++++++++++-- .../block/poolscleaner/poolscleaner_test.go | 64 +++++++++++-------- process/block/shardblock.go | 34 +++++----- process/errors.go | 6 ++ process/interface.go | 3 +- process/mock/poolscleanerMock.go | 27 ++++++++ 15 files changed, 183 insertions(+), 110 deletions(-) create mode 100644 integrationTests/mock/poolscleanerMock.go delete mode 100644 process/block/poolscleaner/nilpoolscleaner.go create mode 100644 process/mock/poolscleanerMock.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 1af49f7b127..7ddbef11214 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1504,6 +1504,16 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + poolsCleaner, err := poolscleaner.NewTxsPoolsCleaner( + state.AccountsAdapter, + shardCoordinator, + data.Datapool, + state.AddressConverter, + ) + if err != nil { + return nil, nil, err + } + argumentsBaseProcessor := block.ArgBaseProcessor{ Accounts: state.AccountsAdapter, ForkDetector: forkDetector, @@ -1521,6 +1531,7 @@ func newShardBlockProcessorAndTracker( DataPool: data.Datapool, BlocksTracker: blockTracker, TxCoordinator: txCoordinator, + TxsPoolsCleaner: poolsCleaner, } blockProcessor, err := block.NewShardProcessor(arguments) @@ -1533,21 +1544,6 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - poolsCleaner, err := poolscleaner.NewTxsPoolsCleaner( - state.AccountsAdapter, - shardCoordinator, - data.Datapool, - state.AddressConverter, - ) - if err != nil { - return nil, nil, err - } - - err = blockProcessor.SetPoolsCleaner(poolsCleaner) - if err != nil { - return nil, nil, err - } - return blockProcessor, blockTracker, nil } diff --git a/integrationTests/mock/poolscleanerMock.go b/integrationTests/mock/poolscleanerMock.go new file mode 100644 index 00000000000..c5b32a5e6c7 --- /dev/null +++ b/integrationTests/mock/poolscleanerMock.go @@ -0,0 +1,27 @@ +package mock + +import "time" + +type TxPoolsCleanerMock struct { + CleanCalled func(duration time.Duration) (bool, error) + NumRemovedTxsCalled func() uint64 +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleanerMock) Clean(duration time.Duration) (bool, error) { + return false, nil +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleanerMock) NumRemovedTxs() uint64 { + return 0 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleanerMock) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 1bade519c08..62242f48e97 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -357,7 +357,8 @@ func createNetNode( return make([]data.HeaderHandler, 0) }, }, - TxCoordinator: tc, + TxCoordinator: tc, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } blockProcessor, _ := block.NewShardProcessor(arguments) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 48cd25c5c6c..6d3f37c33b8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -402,9 +402,10 @@ func (tpn *TestProcessorNode) initBlockProcessor() { RequestHandler: tpn.RequestHandler, Core: nil, }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, + DataPool: tpn.ShardDataPool, + BlocksTracker: tpn.BlockTracker, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 8730e682d4b..59bd147ae1c 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -106,9 +106,10 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { RequestHandler: tpn.RequestHandler, Core: nil, }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, + DataPool: tpn.ShardDataPool, + BlocksTracker: tpn.BlockTracker, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 1271618735f..8318711d641 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -31,7 +31,8 @@ type ArgBaseProcessor struct { // new instances of shard processor type ArgShardProcessor struct { *ArgBaseProcessor - DataPool dataRetriever.PoolsHolder - BlocksTracker process.BlocksTracker - TxCoordinator process.TransactionCoordinator + DataPool dataRetriever.PoolsHolder + BlocksTracker process.BlocksTracker + TxCoordinator process.TransactionCoordinator + TxsPoolsCleaner process.PoolsCleaner } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 92e22f25076..898ec279889 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -328,9 +328,10 @@ func CreateMockArguments() blproc.ArgShardProcessor { RequestHandler: &mock.RequestHandlerMock{}, Core: &mock.ServiceContainerMock{}, }, - DataPool: initDataPool([]byte("")), - BlocksTracker: &mock.BlocksTrackerMock{}, - TxCoordinator: &mock.TransactionCoordinatorMock{}, + DataPool: initDataPool([]byte("")), + BlocksTracker: &mock.BlocksTrackerMock{}, + TxCoordinator: &mock.TransactionCoordinatorMock{}, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } return arguments diff --git a/process/block/export_test.go b/process/block/export_test.go index 3428d2f96b1..c6df63b24c3 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -67,9 +67,10 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo RequestHandler: &mock.RequestHandlerMock{}, Core: &mock.ServiceContainerMock{}, }, - DataPool: tdp, - BlocksTracker: &mock.BlocksTrackerMock{}, - TxCoordinator: &mock.TransactionCoordinatorMock{}, + DataPool: tdp, + BlocksTracker: &mock.BlocksTrackerMock{}, + TxCoordinator: &mock.TransactionCoordinatorMock{}, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } shardProcessor, err := NewShardProcessor(arguments) return shardProcessor, err diff --git a/process/block/poolscleaner/nilpoolscleaner.go b/process/block/poolscleaner/nilpoolscleaner.go deleted file mode 100644 index a19c32ea7bb..00000000000 --- a/process/block/poolscleaner/nilpoolscleaner.go +++ /dev/null @@ -1,28 +0,0 @@ -package poolscleaner - -// NilPoolsCleaner will be used when an PoolsCleaner is required, but another one isn't necessary or available -type NilPoolsCleaner struct { -} - -// NewNilPoolsCleaner will return an instance of the struct -func NewNilPoolsCleaner() *NilPoolsCleaner { - return new(NilPoolsCleaner) -} - -// Clean method - won't do anything -func (nsh *NilPoolsCleaner) Clean(haveTime func() bool) error { - return nil -} - -// NumRemovedTxs - won't do anything -func (nsh *NilPoolsCleaner) NumRemovedTxs() uint64 { - return 0 -} - -// IsInterfaceNil returns true if there is no value under the interface -func (nsh *NilPoolsCleaner) IsInterfaceNil() bool { - if nsh == nil { - return true - } - return false -} diff --git a/process/block/poolscleaner/poolscleaner.go b/process/block/poolscleaner/poolscleaner.go index a5d794ac18d..3cb78b1b4e4 100644 --- a/process/block/poolscleaner/poolscleaner.go +++ b/process/block/poolscleaner/poolscleaner.go @@ -2,6 +2,7 @@ package poolscleaner import ( "sync/atomic" + "time" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -17,6 +18,7 @@ type TxPoolsCleaner struct { dataPool dataRetriever.PoolsHolder addrConverter state.AddressConverter numRemovedTxs uint64 + canDoClean chan struct{} } // NewTxsPoolsCleaner will return a new transaction pools cleaner @@ -43,22 +45,42 @@ func NewTxsPoolsCleaner( return nil, process.ErrNilAddressConverter } + canDoClean := make(chan struct{}, 1) + return &TxPoolsCleaner{ accounts: accounts, shardCoordinator: shardCoordinator, dataPool: dataPool, addrConverter: addrConverter, numRemovedTxs: 0, + canDoClean: canDoClean, }, nil } // Clean will check if in pools exits transactions with nonce low that transaction sender account nonce // and if tx have low nonce will be removed from pools -func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { - if haveTime == nil { - return process.ErrNilHaveTimeHandler +func (tpc *TxPoolsCleaner) Clean(duration time.Duration) (bool, error) { + if duration == 0 { + return false, process.ErrZeroCleaningTime } + select { + case tpc.canDoClean <- struct{}{}: + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime) < duration + } + + tpc.cleanPools(haveTime) + <-tpc.canDoClean + + return true, nil + default: + return false, nil + } +} + +func (tpc *TxPoolsCleaner) cleanPools(haveTime func() bool) { shardId := tpc.shardCoordinator.SelfId() transactions := tpc.dataPool.Transactions() numOfShards := tpc.shardCoordinator.NumberOfShards() @@ -69,7 +91,7 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { for _, key := range txsPool.Keys() { if !haveTime() { - return nil + return } obj, ok := txsPool.Peek(key) @@ -108,11 +130,17 @@ func (tpc *TxPoolsCleaner) Clean(haveTime func() bool) error { } } } - - return nil } // NumRemovedTxs will return the number of removed txs from pools func (tpc *TxPoolsCleaner) NumRemovedTxs() uint64 { return atomic.LoadUint64(&tpc.numRemovedTxs) } + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleaner) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} diff --git a/process/block/poolscleaner/poolscleaner_test.go b/process/block/poolscleaner/poolscleaner_test.go index 11fec97d053..6132e6a1e5a 100644 --- a/process/block/poolscleaner/poolscleaner_test.go +++ b/process/block/poolscleaner/poolscleaner_test.go @@ -187,7 +187,7 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { t.Parallel() - cleaningTimeNumSeconds := 1.0 + cleaningTime := time.Second nonce := uint64(1) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) @@ -196,13 +196,9 @@ func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - startTime := time.Now() - haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < cleaningTimeNumSeconds - } - - err := txsPoolsCleaner.Clean(haveTime) + itRan, err := txsPoolsCleaner.Clean(cleaningTime) assert.Nil(t, err) + assert.Equal(t, true, itRan) numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() assert.Equal(t, uint64(1), numRemovedTxs) @@ -211,7 +207,8 @@ func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { t.Parallel() - cleanDurationSeconds := 2.0 + numRemovedTxsExpected := uint64(3) + cleanDuration := 2 * time.Second accounts := &mock.AccountsStub{ GetExistingAccountCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { return nil, state.ErrAccNotFound @@ -222,22 +219,19 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - startTime := time.Now() - haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds - } - - err := txsPoolsCleaner.Clean(haveTime) + itRan, err := txsPoolsCleaner.Clean(cleanDuration) assert.Nil(t, err) + assert.Equal(t, true, itRan) numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() - assert.Equal(t, uint64(2), numRemovedTxs) + assert.Equal(t, numRemovedTxsExpected, numRemovedTxs) } func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { t.Parallel() - cleanDurationSeconds := 2.0 + numRemovedTxsExpected := uint64(3) + cleanDuration := 2 * time.Second nonce := uint64(11) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) @@ -246,16 +240,12 @@ func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - startTime := time.Now() - haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < cleanDurationSeconds - } - - err := txsPoolsCleaner.Clean(haveTime) + itRan, err := txsPoolsCleaner.Clean(cleanDuration) assert.Nil(t, err) + assert.Equal(t, true, itRan) numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() - assert.Equal(t, uint64(2), numRemovedTxs) + assert.Equal(t, numRemovedTxsExpected, numRemovedTxs) } func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { @@ -269,6 +259,30 @@ func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - err := txsPoolsCleaner.Clean(nil) - assert.Equal(t, process.ErrNilHaveTimeHandler, err) + itRan, err := txsPoolsCleaner.Clean(0) + assert.Equal(t, process.ErrZeroCleaningTime, err) + assert.Equal(t, false, itRan) +} + +func TestTxPoolsCleaner_CleanWillDoNothingIfIsCalledMultipleTime(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + go func() { + _, _ = txsPoolsCleaner.Clean(time.Second) + }() + time.Sleep(time.Millisecond) + go func() { + itRan, _ := txsPoolsCleaner.Clean(time.Second) + assert.Equal(t, false, itRan) + }() + + time.Sleep(2 * time.Second) } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a724abce1e4..a0369e0f8eb 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -14,13 +14,12 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" ) -const cleaningTime = 1 +const cleaningTime = time.Second // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { @@ -95,14 +94,18 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, err } + if arguments.TxsPoolsCleaner == nil || arguments.TxsPoolsCleaner.IsInterfaceNil() { + return nil, process.ErrNilTxsPoolsCleaner + } + sp := shardProcessor{ - core: arguments.Core, - baseProcessor: base, - dataPool: arguments.DataPool, - blocksTracker: arguments.BlocksTracker, - txCoordinator: arguments.TxCoordinator, - txCounter: NewTransactionCounter(), - txsPoolsCleaner: poolscleaner.NewNilPoolsCleaner(), + core: arguments.Core, + baseProcessor: base, + dataPool: arguments.DataPool, + blocksTracker: arguments.BlocksTracker, + txCoordinator: arguments.TxCoordinator, + txCounter: NewTransactionCounter(), + txsPoolsCleaner: arguments.TxsPoolsCleaner, } sp.chRcvAllMetaHdrs = make(chan bool) @@ -716,16 +719,9 @@ func (sp *shardProcessor) CommitBlock( func (sp *shardProcessor) cleanTxsPools() { go func() { - startTime := time.Now() - haveTime := func() bool { - return time.Now().Sub(startTime).Seconds() < cleaningTime - } - - errW := sp.txsPoolsCleaner.Clean(haveTime) - log.LogIfError(errW) - - numRemovedTxs := sp.txsPoolsCleaner.NumRemovedTxs() - log.Info(fmt.Sprintf("Total txs removed from pools with clean mechanism %d", numRemovedTxs)) + _, err := sp.txsPoolsCleaner.Clean(cleaningTime) + log.LogIfError(err) + log.Info(fmt.Sprintf("Total txs removed from pools cleaner %d", sp.txsPoolsCleaner.NumRemovedTxs())) }() } diff --git a/process/errors.go b/process/errors.go index c3ed64a48d2..2b80aa62bd3 100644 --- a/process/errors.go +++ b/process/errors.go @@ -426,3 +426,9 @@ var ErrVMTypeLengthInvalid = errors.New("vm type length is too long") // ErrOverallBalanceChangeFromSC signals that all sumed balance changes are not zero var ErrOverallBalanceChangeFromSC = errors.New("SC output balance updates are wrong") + +// ErrNilTxsPoolsCleaner signals that a nil transactions pools cleaner has been provided +var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") + +// ErrZeroCleaningTime signal that cleaning time for pools its less or equals with 0 +var ErrZeroCleaningTime = errors.New("cleaning time is equal or less than zero") diff --git a/process/interface.go b/process/interface.go index c252c013d41..cbcd56556e2 100644 --- a/process/interface.go +++ b/process/interface.go @@ -361,6 +361,7 @@ type TxValidatorHandler interface { // PoolsCleaner define the functionality that is needed for a pools cleaner type PoolsCleaner interface { - Clean(haveTime func() bool) error + Clean(duration time.Duration) (bool, error) NumRemovedTxs() uint64 + IsInterfaceNil() bool } diff --git a/process/mock/poolscleanerMock.go b/process/mock/poolscleanerMock.go new file mode 100644 index 00000000000..c5b32a5e6c7 --- /dev/null +++ b/process/mock/poolscleanerMock.go @@ -0,0 +1,27 @@ +package mock + +import "time" + +type TxPoolsCleanerMock struct { + CleanCalled func(duration time.Duration) (bool, error) + NumRemovedTxsCalled func() uint64 +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleanerMock) Clean(duration time.Duration) (bool, error) { + return false, nil +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleanerMock) NumRemovedTxs() uint64 { + return 0 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleanerMock) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} From 7b35f462dd2eb2f0e493dba159ce4a637b891f32 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 19 Sep 2019 19:18:12 +0300 Subject: [PATCH 112/234] process, integrationTests: fix metachain rewards --- .../mock/specialAddressHandlerMock.go | 26 +++++++++++++++++++ .../smartContract/testInitilalizer.go | 24 ++++++++--------- integrationTests/testInitializer.go | 6 +++++ integrationTests/testProcessorNode.go | 10 +++---- integrationTests/testSyncNode.go | 5 +++- process/block/preprocess/rewardsHandler.go | 14 ++-------- .../block/preprocess/rewardsHandler_test.go | 7 ++--- process/block/shardblock.go | 3 ++- 8 files changed, 61 insertions(+), 34 deletions(-) diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 6c59dd4e92d..7df8bd7dced 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -18,6 +18,26 @@ type SpecialAddressHandlerMock struct { metaConsensusData []*data.ConsensusRewardData } +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } +} + func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } @@ -81,10 +101,16 @@ func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { } func (sh *SpecialAddressHandlerMock) Round() uint64 { + if sh.shardConsensusData == nil { + return 0 + } return sh.shardConsensusData.Round } func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + if sh.shardConsensusData == nil { + return 0 + } return sh.shardConsensusData.Epoch } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index e9434db265a..bcff4b5da1b 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -339,10 +339,10 @@ func createNetNode( testMarshalizer, testHasher, testAddressConverter, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + ), store, dPool, ) @@ -425,10 +425,10 @@ func createNetNode( accntAdapter, shardCoordinator, nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + ), &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeader data.HeaderHandler, finalHeaderHash []byte) error { return nil @@ -768,10 +768,10 @@ func createMetaNetNode( }, shardCoordinator, nodesCoordinator, - &mock.SpecialAddressHandlerMock{ - ShardCoordinator: shardCoordinator, - AdrConv: testAddressConverter, - }, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + ), testHasher, testMarshalizer, store, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 6e91d7d882c..6c1674ef73d 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -502,7 +502,13 @@ func IncrementAndPrintRound(round uint64) uint64 { // ProposeBlock proposes a block with SC txs for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { fmt.Println("All shards propose blocks...") + for idx, n := range nodes { + consensusRewardsData := n.SpecialAddressHandler.ConsensusShardRewardData() + // set the consensus reward addresses as rewards processor expects at least valid round + // otherwise the produced rewards will not be valid on verification + n.BlockProcessor.SetConsensusData(consensusRewardsData.Addresses, round) + if !IsIntInSlice(idx, idxProposers) { continue } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index bc8de186bc1..57d827a429a 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -190,10 +190,10 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 } func (tpn *TestProcessorNode) initTestNode() { - tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{ - ShardCoordinator: tpn.ShardCoordinator, - AdrConv: TestAddressConverter, - } + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + ) tpn.initStorage() tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() @@ -342,7 +342,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) - internalTxProducer,_:= rewardsInter.(process.InternalTransactionProducer) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( tpn.AccntState, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 24486b2e848..8ccc85e15c8 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -55,7 +55,10 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) - tpn.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{} + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + ) tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index dc19eda13d6..6f5a4daabc9 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -136,16 +136,6 @@ func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHa return nil } -func (rtxh *rewardsHandler) getShardIdsFromAddress(addr []byte) (uint32, error) { - address, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes(addr) - if err != nil { - return rtxh.shardCoordinator.NumberOfShards(), err - } - shardId := rtxh.shardCoordinator.ComputeId(address) - - return shardId, nil -} - // CreateAllInterMiniBlocks creates miniblocks from process transactions func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { rtxh.mutGenRewardTxs.Lock() @@ -379,13 +369,13 @@ func (rtxh *rewardsHandler) createProtocolRewardsForMeta() []data.TransactionHan for _, metaConsensusSet := range metaRewardsData { for _, address := range metaConsensusSet.Addresses { - addr, err := rtxh.adrConv.CreateAddressFromPublicKeyBytes([]byte(address)) + shardId, err := rtxh.address.ShardIdForAddress([]byte(address)) if err != nil { log.Error(err.Error()) continue } - if rtxh.shardCoordinator.ComputeId(addr) != rtxh.shardCoordinator.SelfId() { + if shardId != rtxh.shardCoordinator.SelfId() { continue } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 821e3f31152..3275273227f 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -367,9 +367,10 @@ func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) tdp := initDataPool() th, err := NewRewardTxHandler( - &mock.SpecialAddressHandlerMock{ - AdrConv: &mock.AddressConverterMock{}, - ShardCoordinator: shardCoordinator}, + mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + ), &mock.HasherMock{}, &mock.MarshalizerMock{}, shardCoordinator, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index ad6ed897d99..93f2cd35b24 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -864,7 +864,8 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromPool( return nil, process.ErrNilTxBlockBody } if usedMetaBlockHashes == nil { - return nil, process.ErrNilMetaBlockHashes + // not an error, it can happen that no metablock header is used. + return make([]data.HeaderHandler, 0), nil } miniBlockHashes := make(map[int][]byte, 0) From b292a9ed0dab6bb0566f6792a5f72c6e7e21c312 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 20 Sep 2019 09:18:32 +0300 Subject: [PATCH 113/234] added variable for last step delay in which the nodes should resolve their fork --- integrationTests/sync/basicSync_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 0f0e64da7d6..9c9b8c98100 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -157,7 +157,8 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { round = integrationTests.IncrementAndPrintRound(round) updateRound(nodes, round) - time.Sleep(3 * stepDelay) + stepDelayForkResolving := 3 * stepDelay + time.Sleep(stepDelayForkResolving) testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) testAllNodesHaveSameLastBlock(t, nodes) From 4f2460281e314eae3d47105a86a8bc204004bd87 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 20 Sep 2019 11:54:04 +0300 Subject: [PATCH 114/234] updated sync tests to wait a little bit more for other peers to sync the new proposed blocks --- integrationTests/sync/basicSync_test.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 9c9b8c98100..8ed852a0439 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -15,6 +15,7 @@ import ( var stepDelay = time.Second var delayP2pBootstrap = time.Second * 2 +var stepSync = time.Second * 2 func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { if testing.Short() { @@ -76,14 +77,14 @@ func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) - time.Sleep(stepDelay) + time.Sleep(stepSync) round = integrationTests.IncrementAndPrintRound(round) updateRound(nodes, round) nonce++ } - time.Sleep(stepDelay) + time.Sleep(stepSync) testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) } @@ -139,13 +140,15 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) - time.Sleep(stepDelay) + time.Sleep(stepSync) round = integrationTests.IncrementAndPrintRound(round) updateRound(nodes, round) nonce++ } + time.Sleep(stepSync) + pubKeysVariant1 := []byte("1") pubKeysVariant2 := []byte("2") @@ -157,7 +160,7 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { round = integrationTests.IncrementAndPrintRound(round) updateRound(nodes, round) - stepDelayForkResolving := 3 * stepDelay + stepDelayForkResolving := 4 * stepDelay time.Sleep(stepDelayForkResolving) testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) From e6d29845160f4e853685dace1af5669e35e04dfa Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 20 Sep 2019 12:53:55 +0300 Subject: [PATCH 115/234] EN-3887 : fix after review --- cmd/node/factory/structs.go | 6 ++-- .../txPoolsCleaner.go} | 4 +-- .../txPoolsCleaner_test.go} | 32 +++++++++---------- process/block/shardblock.go | 23 +++---------- process/errors.go | 4 +-- 5 files changed, 28 insertions(+), 41 deletions(-) rename process/block/{poolscleaner/poolscleaner.go => poolsCleaner/txPoolsCleaner.go} (98%) rename process/block/{poolscleaner/poolscleaner_test.go => poolsCleaner/txPoolsCleaner_test.go} (90%) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7ddbef11214..e171cfe0d61 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -53,7 +53,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" - "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" + "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" @@ -1504,7 +1504,7 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } - poolsCleaner, err := poolscleaner.NewTxsPoolsCleaner( + txPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner( state.AccountsAdapter, shardCoordinator, data.Datapool, @@ -1531,7 +1531,7 @@ func newShardBlockProcessorAndTracker( DataPool: data.Datapool, BlocksTracker: blockTracker, TxCoordinator: txCoordinator, - TxsPoolsCleaner: poolsCleaner, + TxsPoolsCleaner: txPoolsCleaner, } blockProcessor, err := block.NewShardProcessor(arguments) diff --git a/process/block/poolscleaner/poolscleaner.go b/process/block/poolsCleaner/txPoolsCleaner.go similarity index 98% rename from process/block/poolscleaner/poolscleaner.go rename to process/block/poolsCleaner/txPoolsCleaner.go index 3cb78b1b4e4..61da3f3fbc0 100644 --- a/process/block/poolscleaner/poolscleaner.go +++ b/process/block/poolsCleaner/txPoolsCleaner.go @@ -1,4 +1,4 @@ -package poolscleaner +package poolsCleaner import ( "sync/atomic" @@ -61,7 +61,7 @@ func NewTxsPoolsCleaner( // and if tx have low nonce will be removed from pools func (tpc *TxPoolsCleaner) Clean(duration time.Duration) (bool, error) { if duration == 0 { - return false, process.ErrZeroCleaningTime + return false, process.ErrZeroMaxCleanTime } select { diff --git a/process/block/poolscleaner/poolscleaner_test.go b/process/block/poolsCleaner/txPoolsCleaner_test.go similarity index 90% rename from process/block/poolscleaner/poolscleaner_test.go rename to process/block/poolsCleaner/txPoolsCleaner_test.go index 6132e6a1e5a..863f9d0ce1b 100644 --- a/process/block/poolscleaner/poolscleaner_test.go +++ b/process/block/poolsCleaner/txPoolsCleaner_test.go @@ -1,4 +1,4 @@ -package poolscleaner_test +package poolsCleaner_test import ( "bytes" @@ -12,7 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/poolscleaner" + "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" "github.com/stretchr/testify/assert" @@ -102,7 +102,7 @@ func TestNewTxsPoolsCleaner_NilAccountsShouldErr(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPool([]byte("test")) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(nil, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(nil, shardCoordinator, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -116,7 +116,7 @@ func TestNewTxsPoolsCleaner_NilShardCoordinatorShouldErr(t *testing.T) { accounts := getAccAdapter(nonce, balance) tdp := initDataPool([]byte("test")) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, nil, tdp, addrConverter) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, nil, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -130,7 +130,7 @@ func TestNewTxsPoolsCleaner_NilDataPoolShouldErr(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, nil, addrConverter) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, nil, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -149,7 +149,7 @@ func TestNewTxsPoolsCleaner_NilTransactionPoolShouldErr(t *testing.T) { }, } addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilTransactionPool, err) @@ -163,7 +163,7 @@ func TestNewTxsPoolsCleaner_NilAddressConverterShouldErr(t *testing.T) { accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPool([]byte("test")) - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, nil) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, nil) assert.Nil(t, txsPoolsCleaner) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -178,7 +178,7 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPool([]byte("test")) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, err := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) assert.NotNil(t, txsPoolsCleaner) assert.Nil(t, err) @@ -187,16 +187,16 @@ func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { t.Parallel() - cleaningTime := time.Second + maxCleanTime := time.Second nonce := uint64(1) balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - itRan, err := txsPoolsCleaner.Clean(cleaningTime) + itRan, err := txsPoolsCleaner.Clean(maxCleanTime) assert.Nil(t, err) assert.Equal(t, true, itRan) @@ -217,7 +217,7 @@ func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) itRan, err := txsPoolsCleaner.Clean(cleanDuration) assert.Nil(t, err) @@ -238,7 +238,7 @@ func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) itRan, err := txsPoolsCleaner.Clean(cleanDuration) assert.Nil(t, err) @@ -257,10 +257,10 @@ func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) itRan, err := txsPoolsCleaner.Clean(0) - assert.Equal(t, process.ErrZeroCleaningTime, err) + assert.Equal(t, process.ErrZeroMaxCleanTime, err) assert.Equal(t, false, itRan) } @@ -273,7 +273,7 @@ func TestTxPoolsCleaner_CleanWillDoNothingIfIsCalledMultipleTime(t *testing.T) { shardCoordinator := mock.NewOneShardCoordinatorMock() tdp := initDataPoolWithFourTransactions() addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") - txsPoolsCleaner, _ := poolscleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) go func() { _, _ = txsPoolsCleaner.Clean(time.Second) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a0369e0f8eb..862aa9bb9fa 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,7 +1,6 @@ package block import ( - "errors" "fmt" "sort" "sync" @@ -19,7 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler" ) -const cleaningTime = time.Second +const maxCleanTime = time.Second // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { @@ -130,16 +129,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return &sp, nil } -// SetPoolsCleaner will set pool cleaner -func (sp *shardProcessor) SetPoolsCleaner(poolsCleaner process.PoolsCleaner) error { - if poolsCleaner == nil { - return errors.New("nil pools cleaner") - } - sp.txsPoolsCleaner = poolsCleaner - - return nil -} - // ProcessBlock processes a block. It returns nil if all ok or the specific error func (sp *shardProcessor) ProcessBlock( chainHandler data.ChainHandler, @@ -700,7 +689,7 @@ func (sp *shardProcessor) CommitBlock( chainHandler.SetCurrentBlockHeaderHash(headerHash) sp.indexBlockIfNeeded(bodyHandler, headerHandler) - sp.cleanTxsPools() + go sp.cleanTxsPools() // write data to log go sp.txCounter.displayLogInfo( @@ -718,11 +707,9 @@ func (sp *shardProcessor) CommitBlock( } func (sp *shardProcessor) cleanTxsPools() { - go func() { - _, err := sp.txsPoolsCleaner.Clean(cleaningTime) - log.LogIfError(err) - log.Info(fmt.Sprintf("Total txs removed from pools cleaner %d", sp.txsPoolsCleaner.NumRemovedTxs())) - }() + _, err := sp.txsPoolsCleaner.Clean(maxCleanTime) + log.LogIfError(err) + log.Info(fmt.Sprintf("Total txs removed from pools cleaner %d", sp.txsPoolsCleaner.NumRemovedTxs())) } // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain diff --git a/process/errors.go b/process/errors.go index 2b80aa62bd3..e111176f202 100644 --- a/process/errors.go +++ b/process/errors.go @@ -430,5 +430,5 @@ var ErrOverallBalanceChangeFromSC = errors.New("SC output balance updates are wr // ErrNilTxsPoolsCleaner signals that a nil transactions pools cleaner has been provided var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") -// ErrZeroCleaningTime signal that cleaning time for pools its less or equals with 0 -var ErrZeroCleaningTime = errors.New("cleaning time is equal or less than zero") +// ErrZeroMaxCleanTime signal that cleaning time for pools its less or equals with 0 +var ErrZeroMaxCleanTime = errors.New("cleaning time is equal or less than zero") From 0bdea34840a23fc8b2e1955ea64c2968adc67d7d Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 20 Sep 2019 17:52:12 +0300 Subject: [PATCH 116/234] integrationTests: remove warnings --- .../multiShard/block/executingRewardMiniblocks_test.go | 4 +++- process/block/preprocess/rewardsHandler.go | 8 ++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 04d56d0a626..1f98ad91bba 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/opentracing/opentracing-go/log" "math/big" "testing" "time" @@ -17,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/opentracing/opentracing-go/log" "github.com/stretchr/testify/assert" ) @@ -162,7 +162,9 @@ func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { shardRewardsData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() if shardRewardsData == nil { log.Error(errors.New("nil shard rewards data")) + shardRewardsData = &data.ConsensusRewardData{} } + addrRewards := shardRewardsData.Addresses updateExpectedRewards(mapRewardsForAddress, addrRewards) } diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 6f5a4daabc9..099a8008e46 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -440,10 +440,10 @@ func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.Transacti rtxh.mut.Lock() rewardTxPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range rtxh.rewardTxsForBlock { + for txHash, info := range rtxh.rewardTxsForBlock { - senderShard := txInfo.ShardId - receiverShard, err := rtxh.address.ShardIdForAddress(txInfo.RcvAddr) + senderShard := info.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(info.RcvAddr) if err != nil { continue } @@ -453,7 +453,7 @@ func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.Transacti if senderShard != rtxh.shardCoordinator.SelfId() { continue } - rewardTxPool[txHash] = txInfo + rewardTxPool[txHash] = info } rtxh.mut.Unlock() From 3eb7a0c6f201913763f90129741ba5da5b5d9aeb Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 20 Sep 2019 18:24:04 +0300 Subject: [PATCH 117/234] EN-4110: unit tests --- .../block/preprocess/rewardTxPreProcessor.go | 4 +- .../preprocess/rewardTxPreProcessor_test.go | 742 ++++++++++++++++++ 2 files changed, 744 insertions(+), 2 deletions(-) create mode 100644 process/block/preprocess/rewardTxPreProcessor_test.go diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 769be213c9b..c91573e8fcf 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -54,7 +54,7 @@ func NewRewardTxPreprocessor( return nil, process.ErrNilStorage } if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { - return nil, process.ErrNilTxProcessor + return nil, process.ErrNilRewardsTxProcessor } if rewardProducer == nil || rewardProcessor.IsInterfaceNil() { return nil, process.ErrNilInternalTransactionProducer @@ -435,7 +435,7 @@ func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( maxMbSpaceRemained uint32, round uint64, _ func() bool, -) (block.MiniBlockSlice, error){ +) (block.MiniBlockSlice, error) { // always have time for rewards haveTime := func() bool { diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go new file mode 100644 index 00000000000..e508dde1e8a --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -0,0 +1,742 @@ +package preprocess + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxPreprocessor_NilRewardTxDataPoolShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxPreprocessor( + nil, + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxPreprocessor_NilStoreShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxPreprocessor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxPreprocessor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + nil, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxPreprocessor_NilRewardTxProcessorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) +} + +func TestNewRewardTxPreprocessor_NilRewardProducerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilInternalTransactionProducer, err) +} + +func TestNewRewardTxPreprocessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + nil, + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxPreprocessor_NilAccountsAdapterShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxPreprocessor_NilRequestHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + nil, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRequestHandler, err) +} + +func TestNewRewardTxPreprocessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + assert.Nil(t, err) + assert.NotNil(t, rtp) +} + +func TestRewardTxPreprocessor_AddComputedRewardMiniBlocksShouldAddMiniBlock(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res := rtp.GetAllCurrentUsedTxs() + + if _, ok := res[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res, err := rtp.CreateMarshalizedData(txHashes) + assert.Nil(t, err) + assert.Equal(t, 1, len(res)) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + assert.Equal(t, process.ErrWrongTypeInMiniBlock, err) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + + assert.Nil(t, err) + + txsMap := rtp.GetAllCurrentUsedTxs() + if _, ok := txsMap[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_SaveTxBlockToStorageShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + err := rtp.SaveTxBlockToStorage(blockBody) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + _ = rtp.SaveTxBlockToStorage(blockBody) + + res := rtp.RequestBlockTransactions(blockBody) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + res := rtp.RequestTransactionsForMiniBlock(mb1) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + err := rtp.ProcessBlockTransactions(blockBody, 0, haveTimeTrue) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Equal(t, process.ErrTimeIsOut, err) +} + +func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + go func() { + time.Sleep(50 * time.Millisecond) + rtp.chReceivedAllRewardTxs <- true + }() + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + storer := mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + retMap := map[string][]byte{ + "tx_hash1": []byte(`{"Round": 0}`), + } + + return retMap, nil + }, + } + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &storer, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte("tx_hash1")} + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + var blockBody block.Body + blockBody = append(blockBody, &mb1) + miniBlockPool := mock.NewCacherMock() + + numRestoredTxs, resMap, err := rtp.RestoreTxBlockIntoPools(blockBody, miniBlockPool) + assert.Equal(t, 1, numRestoredTxs) + assert.NotNil(t, resMap) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksTxForMiniBlockNotFoundShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("hash_unavailable")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.Nil(t, mBlocksSlice) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("tx1_hash")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.NotNil(t, mBlocksSlice) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + rtp.CreateBlockStarted() + + assert.Equal(t, 0, len(rtp.rewardTxsForBlock.txHashAndInfo)) +} From 9f41a2d0f035e06bbd388ccb14fe762b4f50af07 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Sat, 21 Sep 2019 11:28:43 +0300 Subject: [PATCH 118/234] process, dataRetriever, integrationTests: Fixes after merge --- .../requestHandlers/requestHandler_test.go | 2 +- integrationTests/testSyncNode.go | 2 +- process/block/argProcessor.go | 22 ++++++----- .../block/preprocess/rewardTxPreProcessor.go | 38 +++++++++++++------ process/block/shardblock.go | 8 ++-- process/block/shardblock_test.go | 10 +++-- .../interceptorsContainerFactory_test.go | 3 +- process/smartContract/process_test.go | 9 ++--- 8 files changed, 58 insertions(+), 36 deletions(-) diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 6ca4c33ed6a..bb840b0cd9f 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -122,7 +122,7 @@ func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "", "topic", 1) + rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "", "topic", 1) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyShardHeaderRequestTopic, err) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index e0b55811774..1a4197fd0e1 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -116,7 +116,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { Store: tpn.Storage, ShardCoordinator: tpn.ShardCoordinator, NodesCoordinator: tpn.NodesCoordinator, - NodesCoordinator: tpn.SpecialAddressHandler, + SpecialAddressHandler: tpn.SpecialAddressHandler, Uint64Converter: TestUint64Converter, StartHeaders: tpn.GenesisBlocks, RequestHandler: tpn.RequestHandler, diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 1271618735f..eab096a4cb4 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -15,16 +15,18 @@ import ( // ArgBaseProcessor holds all dependencies required by the process data factory in order to create // new instances type ArgBaseProcessor struct { - Accounts state.AccountsAdapter - ForkDetector process.ForkDetector - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - Store dataRetriever.StorageService - ShardCoordinator sharding.Coordinator - Uint64Converter typeConverters.Uint64ByteSliceConverter - StartHeaders map[uint32]data.HeaderHandler - RequestHandler process.RequestHandler - Core serviceContainer.Core + Accounts state.AccountsAdapter + ForkDetector process.ForkDetector + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Store dataRetriever.StorageService + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Uint64Converter typeConverters.Uint64ByteSliceConverter + StartHeaders map[uint32]data.HeaderHandler + RequestHandler process.RequestHandler + Core serviceContainer.Core } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 769be213c9b..92adb0a732a 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -2,6 +2,7 @@ package preprocess import ( "fmt" + "github.com/ElrondNetwork/elrond-go/core" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -161,7 +162,14 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) } - restoredHash, err := rtp.restoreMiniBlock(miniBlock, miniBlockPool) + miniBlockHash, err := core.CalculateHash(rtp.marshalizer, rtp.hasher, miniBlock) + if err != nil { + return rewardTxsRestored, miniBlockHashes, err + } + + restoredHash := rtp.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + + err = rtp.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { return rewardTxsRestored, miniBlockHashes, err } @@ -291,25 +299,33 @@ func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - for senderShardID, rewardTxHashesInfo := range missingRewardTxsForShards { - txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: rewardTxHashesInfo.receiverShardID} - for _, txHash := range rewardTxHashesInfo.txHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + for senderShardID, rewardTxHashes := range missingRewardTxsForShards { + for _, txHash := range rewardTxHashes { + rtp.setMissingTxsForShard(senderShardID, txHash) } } rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() - for senderShardID, scrHashesInfo := range missingRewardTxsForShards { - requestedRewardTxs += len(scrHashesInfo.txHashes) - rtp.onRequestRewardTx(senderShardID, scrHashesInfo.txHashes) + for senderShardID, mbsRewardTxHashes := range missingRewardTxsForShards { + for _, mbRewardTxHashes := range mbsRewardTxHashes { + requestedRewardTxs += len(mbRewardTxHashes.txHashes) + rtp.onRequestRewardTx(senderShardID, mbRewardTxHashes.txHashes) + } } return requestedRewardTxs } +func (rtp *rewardTxPreprocessor) setMissingTxsForShard(senderShardID uint32, mbTxHashes *txsHashesInfo) { + txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: mbTxHashes.receiverShardID} + for _, txHash := range mbTxHashes.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + } +} + // computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing // from block.Body -func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32]*txsHashesInfo { +func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32][]*txsHashesInfo { rewardTxs := block.Body{} for _, mb := range body { if mb.Type != block.RewardsBlock { @@ -322,7 +338,7 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod rewardTxs = append(rewardTxs, mb) } - missingTxsForShard := rtp.computeExistingAndMissing( + missingTxsForShards := rtp.computeExistingAndMissing( rewardTxs, &rtp.rewardTxsForBlock, rtp.chReceivedAllRewardTxs, @@ -330,7 +346,7 @@ func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(bod rtp.rewardTxPool, ) - return missingTxsForShard + return missingTxsForShards } // processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool diff --git a/process/block/shardblock.go b/process/block/shardblock.go index bf24363318f..a3fce3c0a30 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -49,8 +49,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { arguments.Marshalizer, arguments.Store, arguments.ShardCoordinator, - arguments.nodesCoordinator, - arguments.specialAddressHandler, + arguments.NodesCoordinator, + arguments.SpecialAddressHandler, arguments.Uint64Converter) if err != nil { return nil, err @@ -82,8 +82,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { marshalizer: arguments.Marshalizer, store: arguments.Store, shardCoordinator: arguments.ShardCoordinator, - nodesCoordinator: arguments.nodesCoordinator, - specialAddressHandler: arguments.specialAddressHandler, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, uint64Converter: arguments.Uint64Converter, onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, appStatusHandler: statusHandler.NewNilStatusHandler(), diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1ef3c1d018c..518160e08ef 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -520,6 +520,8 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing. return 0 }, } + + randSeed := []byte("rand seed") sp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{ Nonce: 1, @@ -663,6 +665,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ @@ -702,7 +705,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, - RootHash: []byte("rootHashX"), + RootHash: []byte("rootHash"), MiniBlockHeaders: mbHdrs, } @@ -743,6 +746,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ @@ -1355,7 +1359,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) @@ -4183,7 +4187,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { shardC := mock.NewMultiShardsCoordinatorMock(3) arguments := CreateMockArgumentsMultiShard() - arguments.DataPool = poolFake + arguments.DataPool = poolMock arguments.Store = storer arguments.ShardCoordinator = shardC arguments.StartHeaders = createGenesisBlocks(shardC) diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 26a36037ebd..173402887fc 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -86,6 +86,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -95,7 +96,6 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -128,6 +128,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing t.Parallel() icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), nil, &mock.TopicHandlerStub{}, diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 5ce4d24e55b..652a3bda2a2 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -247,7 +247,6 @@ func TestNewSmartContractProcessor(t *testing.T) { &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.IntermediateTransactionHandlerMock{}, - &mock.IntermediateTransactionHandlerMock{}, &mock.UnsignedTxHandlerMock{}, ) @@ -268,7 +267,6 @@ func TestScProcessor_ComputeTransactionTypeNil(t *testing.T) { &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.IntermediateTransactionHandlerMock{}, - &mock.IntermediateTransactionHandlerMock{}, &mock.UnsignedTxHandlerMock{}, ) @@ -292,7 +290,6 @@ func TestScProcessor_ComputeTransactionTypeNilTx(t *testing.T) { &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.IntermediateTransactionHandlerMock{}, - &mock.IntermediateTransactionHandlerMock{}, &mock.UnsignedTxHandlerMock{}, ) @@ -322,7 +319,9 @@ func TestScProcessor_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1885,7 +1884,7 @@ func TestScProcessor_processSCOutputAccounts(t *testing.T) { tx := &transaction.Transaction{Value: big.NewInt(0)} outputAccounts := make([]*vmcommon.OutputAccount, 0) - _, err = sc.ProcessSCOutputAccounts(outputAccounts, tx) + err = sc.ProcessSCOutputAccounts(outputAccounts, tx) assert.Nil(t, err) outaddress := []byte("newsmartcontract") From afbfc45876dbe307ff94edb8534da40b1c8740c2 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Sat, 21 Sep 2019 15:44:20 +0300 Subject: [PATCH 119/234] process, integrationTests: fixes after merge --- .../smartContract/testInitilalizer.go | 2 +- process/block/export_test.go | 1 + process/block/shardblock.go | 79 ++++++++++++++++++- 3 files changed, 77 insertions(+), 5 deletions(-) diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 74916310f16..617cf34bab0 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -438,7 +438,7 @@ func createNetNode( Store: store, ShardCoordinator: shardCoordinator, NodesCoordinator: nodesCoordinator, - mock.NewSpecialAddressHandlerMock( + SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( testAddressConverter, shardCoordinator, ), diff --git a/process/block/export_test.go b/process/block/export_test.go index 1723c93d8e8..033286f5fdf 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -45,6 +45,7 @@ func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint } func (sp *shardProcessor) GetProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { + return sp.getProcessedMetaBlocksFromHeader(header) } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 49fd3ba1ac5..a06f83c8930 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -226,7 +226,7 @@ func (sp *shardProcessor) ProcessBlock( } }() - processedMetaHdrs, err := sp.getProcessedMetaBlocksFromHeader(header.MetaBlockHashes) + processedMetaHdrs, err := sp.getProcessedMetaBlocks(body, header.MetaBlockHashes) if err != nil { return err } @@ -844,16 +844,87 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr return ownShIdHdr, nil } +// getProcessedMetaBlocksFromHeader returns all the meta blocks fully processed +func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { + if header == nil { + return nil, process.ErrNilBlockHeader + } + + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(header.MiniBlockHeaders); i++ { + miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash + } + + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + + processedMetaHdrs := make([]data.HeaderHandler, 0) + for _, metaBlockKey := range header.MetaBlockHashes { + obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + if obj == nil { + return nil, process.ErrNilMetaBlockHeader + } + + metaBlock, ok := obj.(*block.MetaBlock) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) + + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range miniBlockHashes { + _, ok := crossMiniBlockHashes[string(miniBlockHashes[key])] + if !ok { + continue + } + + metaBlock.SetMiniBlockProcessed(miniBlockHashes[key], true) + delete(miniBlockHashes, key) + } + + log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) + + processedAll := true + for key := range crossMiniBlockHashes { + if !metaBlock.GetMiniBlockProcessed([]byte(key)) { + processedAll = false + break + } + } + + if processedAll { + processedMetaHdrs = append(processedMetaHdrs, metaBlock) + } + } + + return processedMetaHdrs, nil +} + // getProcessedMetaBlocks returns all the meta blocks fully processed func (sp *shardProcessor) getProcessedMetaBlocks( - miniBlockHashes [][]byte, + usedMiniBlocks []*block.MiniBlock, usedMetaBlockHashes [][]byte, ) ([]data.HeaderHandler, error) { - if miniBlockHashes == nil || if usedMetaBlockHashes == nil { + if usedMiniBlocks == nil || usedMetaBlockHashes == nil { // not an error, it can happen that no metablock header or no miniblock is used. return make([]data.HeaderHandler, 0), nil } + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(usedMiniBlocks); i++ { + miniBlock := usedMiniBlocks[i] + if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { + continue + } + + mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) + if err != nil { + log.Debug(err.Error()) + continue + } + miniBlockHashes[i] = mbHash + } + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) processedMetaHdrs := make([]data.HeaderHandler, 0) @@ -1404,7 +1475,7 @@ func (sp *shardProcessor) createMiniBlocks( log.Info(err.Error()) } - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromPool(block.Body(destMeMiniBlocks), usedMetaHdrsHashes) + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocks(destMeMiniBlocks, usedMetaHdrsHashes) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } From ba8a00bccc26f7f7e5e01818218af31898e8b31b Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Sat, 21 Sep 2019 19:30:56 +0300 Subject: [PATCH 120/234] EN-4110: fixes after review --- .../preprocess/rewardTxPreProcessor_test.go | 54 ------------------- 1 file changed, 54 deletions(-) diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index e508dde1e8a..965a092dfe6 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -254,9 +254,7 @@ func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -269,10 +267,7 @@ func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} - var rewardMiniBlocks block.MiniBlockSlice mb1 := block.MiniBlock{ TxHashes: txHashes, @@ -293,9 +288,7 @@ func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *t t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -308,8 +301,6 @@ func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *t func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} mb1 := block.MiniBlock{ TxHashes: txHashes, @@ -326,9 +317,7 @@ func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -341,8 +330,6 @@ func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} mb1 := block.MiniBlock{ TxHashes: txHashes, @@ -352,7 +339,6 @@ func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { } err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) - assert.Nil(t, err) txsMap := rtp.GetAllCurrentUsedTxs() @@ -365,9 +351,7 @@ func TestRewardTxPreprocessor_SaveTxBlockToStorageShouldWork(t *testing.T) { t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -380,10 +364,7 @@ func TestRewardTxPreprocessor_SaveTxBlockToStorageShouldWork(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} - mb1 := block.MiniBlock{ TxHashes: txHashes, ReceiverShardID: 1, @@ -413,9 +394,7 @@ func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t * t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -428,10 +407,7 @@ func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t * func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} - mb1 := block.MiniBlock{ TxHashes: txHashes, ReceiverShardID: 1, @@ -463,9 +439,7 @@ func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testi t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -478,10 +452,7 @@ func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testi func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} - mb1 := block.MiniBlock{ TxHashes: txHashes, ReceiverShardID: 1, @@ -497,9 +468,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { t.Parallel() txHash := "tx1_hash" - tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -512,10 +481,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte(txHash)} - mb1 := block.MiniBlock{ TxHashes: txHashes, ReceiverShardID: 1, @@ -545,7 +511,6 @@ func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { t.Parallel() tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -558,8 +523,6 @@ func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - err := rtp.IsDataPrepared(1, haveTime) assert.Equal(t, process.ErrTimeIsOut, err) @@ -569,7 +532,6 @@ func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { t.Parallel() tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -582,8 +544,6 @@ func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - go func() { time.Sleep(50 * time.Millisecond) rtp.chReceivedAllRewardTxs <- true @@ -598,7 +558,6 @@ func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { t.Parallel() tdp := initDataPool() - storer := mock.ChainStorerMock{ GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { retMap := map[string][]byte{ @@ -620,10 +579,7 @@ func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - txHashes := [][]byte{[]byte("tx_hash1")} - mb1 := block.MiniBlock{ TxHashes: txHashes, ReceiverShardID: 1, @@ -645,7 +601,6 @@ func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksTxForMiniBlockNotFoundSh t.Parallel() tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -672,8 +627,6 @@ func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksTxForMiniBlockNotFoundSh func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) assert.Nil(t, mBlocksSlice) assert.Equal(t, process.ErrNilRewardTransaction, err) @@ -683,7 +636,6 @@ func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) t.Parallel() tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -710,8 +662,6 @@ func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) assert.NotNil(t, mBlocksSlice) assert.Nil(t, err) @@ -721,7 +671,6 @@ func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { t.Parallel() tdp := initDataPool() - rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), &mock.ChainStorerMock{}, @@ -734,9 +683,6 @@ func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { func(shardID uint32, txHashes [][]byte) {}, ) - assert.NotNil(t, rtp) - rtp.CreateBlockStarted() - assert.Equal(t, 0, len(rtp.rewardTxsForBlock.txHashAndInfo)) } From 22ba1d8e5ee38240f4f7356a3b168599737b1ae6 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Sun, 22 Sep 2019 19:38:02 +0300 Subject: [PATCH 121/234] fixed redirect std err on darwin os --- core/logger/redirectStderrDarwin.go | 18 ++++++++++++++++++ core/logger/redirectStderrLinux.go | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 core/logger/redirectStderrDarwin.go diff --git a/core/logger/redirectStderrDarwin.go b/core/logger/redirectStderrDarwin.go new file mode 100644 index 00000000000..e7fba6f64a4 --- /dev/null +++ b/core/logger/redirectStderrDarwin.go @@ -0,0 +1,18 @@ +//+build darwin + +package logger + +import ( + "os" + "syscall" +) + +// redirectStderr redirects the output of the stderr to the file passed in +func redirectStderr(f *os.File) error { + err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd())) + if err != nil { + return err + } + + return nil +} diff --git a/core/logger/redirectStderrLinux.go b/core/logger/redirectStderrLinux.go index 7aed514fd69..7316f8c3827 100644 --- a/core/logger/redirectStderrLinux.go +++ b/core/logger/redirectStderrLinux.go @@ -1,4 +1,4 @@ -//+build linux darwin +//+build linux package logger From 496a26400ac2f3d0e83f48143d323758c6d4d91e Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Mon, 23 Sep 2019 15:02:21 +0300 Subject: [PATCH 122/234] EN-4120: unit tests for rewards --- .../block/preprocess/rewardsHandler_test.go | 118 ++++++++ .../preprocess/smartContractResults_test.go | 6 +- .../interceptedRewardTransaction_test.go | 148 +++++++++ process/rewardTransaction/interceptor_test.go | 277 +++++++++++++++++ process/rewardTransaction/process.go | 4 +- process/rewardTransaction/process_test.go | 286 ++++++++++++++++++ 6 files changed, 835 insertions(+), 4 deletions(-) create mode 100644 process/rewardTransaction/interceptedRewardTransaction_test.go create mode 100644 process/rewardTransaction/interceptor_test.go create mode 100644 process/rewardTransaction/process_test.go diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 821e3f31152..35e0407476c 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -1,12 +1,15 @@ package preprocess import ( + "bytes" "math/big" "reflect" "testing" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" @@ -456,3 +459,118 @@ func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { assert.True(t, found) } } + +func TestRewardsHandler_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + var expectedMarshalizedTxs [][]byte + marshTx1, _ := th.marshalizer.Marshal(txs[0]) + marshTx2, _ := th.marshalizer.Marshal(txs[1]) + expectedMarshalizedTxs = append(expectedMarshalizedTxs, marshTx1, marshTx2) + + var txsHashes [][]byte + tx1Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[0]) + tx2Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[1]) + txsHashes = append(txsHashes, tx1Hash, tx2Hash) + + res, err := th.CreateMarshalizedData(txsHashes) + assert.Nil(t, err) + assert.Equal(t, len(txs), len(res)) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[0], res[0])) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[1], res[1])) +} + +func TestRewardsHandler_CreateBlockStartedShouldCreateProtocolReward(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th.protocolRewards) + th.CreateBlockStarted() + assert.NotNil(t, th.protocolRewards) +} + +func TestRewardsHandler_SaveCurrentIntermediateTxToStorageShouldWork(t *testing.T) { + t.Parallel() + + putWasCalled := false + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putWasCalled = true + return nil + }, + }, + tdp.RewardTransactions(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + err = th.SaveCurrentIntermediateTxToStorage() + assert.Nil(t, err) + assert.True(t, putWasCalled) +} diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 2e67e19317c..c20aa1d5fc2 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -16,7 +16,7 @@ import ( ) func haveTime() time.Duration { - return time.Duration(2000 * time.Millisecond) + return 2000 * time.Millisecond } func haveTimeTrue() bool { @@ -619,7 +619,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { miniblock := block.MiniBlock{ ReceiverShardID: 0, - SenderShardID: 0, + SenderShardID: 1, TxHashes: txHashes, Type: block.SmartContractResultBlock, } @@ -789,7 +789,7 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { miniblockPool := mock.NewCacherMock() scrRestored, miniBlockHashes, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) - assert.Equal(t, miniBlockHashes[0], []uint8([]byte(nil))) + assert.Equal(t, miniBlockHashes[0], []byte(nil)) assert.Equal(t, scrRestored, 1) assert.Nil(t, err) } diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go new file mode 100644 index 00000000000..a418a73f930 --- /dev/null +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -0,0 +1,148 @@ +package rewardTransaction + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedRewardTransaction_NilTxBuffShouldErr(t *testing.T) { + t.Parallel() + + irt, err := NewInterceptedRewardTransaction( + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilBuffer, err) +} + +func TestNewInterceptedRewardTransaction_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := NewInterceptedRewardTransaction( + txBuff, + nil, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptedRewardTransaction_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewInterceptedRewardTransaction_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewInterceptedRewardTransaction_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + nil) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewInterceptedRewardTransaction_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, irt) + assert.Nil(t, err) +} + +func TestNewInterceptedRewardTransaction_TestGetters(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: shardId, + } + + marshalizer := &mock.MarshalizerMock{} + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return shardId + } + + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + shardCoord) + + assert.NotNil(t, irt) + assert.Nil(t, err) + + assert.Equal(t, shardId, irt.RcvShard()) + assert.Equal(t, shardId, irt.SndShard()) + assert.Equal(t, &rewTx, irt.RewardTransaction()) + assert.False(t, irt.IsAddressedToOtherShards()) + + txHash := irt.hasher.Compute(string(txBuff)) + assert.Equal(t, txHash, irt.Hash()) +} diff --git a/process/rewardTransaction/interceptor_test.go b/process/rewardTransaction/interceptor_test.go new file mode 100644 index 00000000000..826148397aa --- /dev/null +++ b/process/rewardTransaction/interceptor_test.go @@ -0,0 +1,277 @@ +package rewardTransaction + +import ( + "encoding/json" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + nil, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxPoolShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + nil, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxStorerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + nil, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardsTxStorage, err) +} + +func TestNewRewardTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + nil, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + nil) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rti, err := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, rti) + assert.Nil(t, err) + assert.False(t, rti.IsInterfaceNil()) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + err := rti.ProcessReceivedMessage(nil) + assert.Equal(t, process.ErrNilMessage, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + message := &mock.P2PMessageMock{ + DataField: nil, + } + + err := rti.ProcessReceivedMessage(message) + assert.Equal(t, process.ErrNilDataToProcess, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testing.T) { + t.Parallel() + + wasCalled := false + rti, _ := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + wasCalled = true + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + } + rewardTxBytes1, _ := rti.marshalizer.Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + } + rewardTxBytes2, _ := rti.marshalizer.Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + assert.Nil(t, err) + assert.True(t, wasCalled) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *testing.T) { + t.Parallel() + + wasCalled := false + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(1) + } + rti, _ := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + wasCalled = true + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + shardCoord) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 1, + } + rewardTxBytes1, _ := rti.marshalizer.Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 1, + } + rewardTxBytes2, _ := rti.marshalizer.Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + assert.Nil(t, err) + // check that AddData was not called, as tx is cross shard + assert.False(t, wasCalled) +} + +func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { + t.Parallel() + + rti, _ := NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + bytesToSend := []byte("test") + var bytesToReceive []byte + rti.SetBroadcastCallback(func(buffToSend []byte) { + bytesToReceive = buffToSend + return + }) + + rti.broadcastCallbackHandler(bytesToSend) + assert.Equal(t, bytesToSend, bytesToReceive) +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index 950b7fc27f6..9f0002826ff 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -21,7 +21,6 @@ type rewardTxProcessor struct { } // NewRewardTxProcessor creates a rewardTxProcessor instance -// TODO: add unit tests func NewRewardTxProcessor( accountsDB state.AccountsAdapter, adrConv state.AddressConverter, @@ -37,6 +36,9 @@ func NewRewardTxProcessor( if coordinator == nil { return nil, process.ErrNilShardCoordinator } + if rewardTxForwarder == nil { + return nil, process.ErrNilIntermediateTransactionHandler + } return &rewardTxProcessor{ accounts: accountsDB, diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go new file mode 100644 index 00000000000..c75949ceab8 --- /dev/null +++ b/process/rewardTransaction/process_test.go @@ -0,0 +1,286 @@ +package rewardTransaction + +import ( + "errors" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxProcessorNilAccountsDbShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxProcessor( + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxProcessorNilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxProcessor( + &mock.AccountsStub{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxProcessorNilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxProcessorNilRewardTxForwarderShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) +} + +func TestNewRewardTxProcessorOkValsShouldWork(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.NotNil(t, rtp) + assert.Nil(t, err) + assert.False(t, rtp.IsInterfaceNil()) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + err := rtp.ProcessRewardTransaction(nil) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxValueShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{Value: nil} + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrNilValueFromRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotCreateAddressShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot create address") + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterStub{ + CreateAddressFromPublicKeyBytesCalled: func(pubKey []byte) (state.AddressContainer, error) { + return nil, expectedErr + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionAddressNotInNodesShardShouldNotExecute(t *testing.T) { + t.Parallel() + + getAccountWithJournalWasCalled := false + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(5) + } + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + getAccountWithJournalWasCalled = true + return nil, nil + }, + }, + &mock.AddressConverterMock{}, + shardCoord, + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + // account should not be requested as the address is not in node's shard + assert.False(t, getAccountWithJournalWasCalled) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotGetAccountShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot get account") + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return nil, expectedErr + }, + }, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotAddIntermediateTxsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot add intermediate transactions") + rtp, _ := NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{ + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + return expectedErr + }, + }) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionWrongTypeAssertionAccountHolderShouldErr(t *testing.T) { + t.Parallel() + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return mock.NewAccountWrapMock(addressContainer, &mock.AccountTrackerStub{}), nil + }, + } + + rtp, _ := NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { + t.Parallel() + + journalizeWasCalled := false + saveAccountWasCalled := false + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + ats := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeWasCalled = true + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountWasCalled = true + return nil + }, + } + return state.NewAccount(addressContainer, ats) + }, + } + + rtp, _ := NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + assert.True(t, journalizeWasCalled) + assert.True(t, saveAccountWasCalled) +} From 7dbf9efb5af11ddc2370e17ba72cd9837e978eac Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Mon, 23 Sep 2019 15:19:20 +0300 Subject: [PATCH 123/234] EN-4110: fix test after merge --- process/block/preprocess/rewardTxPreProcessor.go | 2 +- process/block/preprocess/rewardTxPreProcessor_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 1f9ff0937eb..5855d33094b 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -2,9 +2,9 @@ package preprocess import ( "fmt" - "github.com/ElrondNetwork/elrond-go/core" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/rewardTx" diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 965a092dfe6..d245c3eab50 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" "github.com/stretchr/testify/assert" ) @@ -566,6 +567,13 @@ func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { return retMap, nil }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, } rtp, _ := NewRewardTxPreprocessor( tdp.RewardTransactions(), From eb0e4fce1e48d0c79c396b901a6e725bf2bd9f9d Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 23 Sep 2019 16:09:22 +0300 Subject: [PATCH 124/234] EN-3981 - add consensus information in elastic search --- cmd/node/main.go | 10 +++ consensus/spos/bls/blsSubroundsFactory.go | 9 ++ consensus/spos/bn/bnSubroundsFactory.go | 9 ++ .../spos/commonSubround/subroundStartRound.go | 31 +++++++ consensus/spos/sposFactory/sposFactory.go | 6 ++ core/indexer/data.go | 6 ++ core/indexer/elasticsearch.go | 83 ++++++++++++++++++- core/indexer/interface.go | 2 + node/defineOptions.go | 10 +++ node/node.go | 5 +- process/block/metablock.go | 15 ++++ sharding/indexHashedNodesCoordinator.go | 13 +++ sharding/interface.go | 1 + 13 files changed, 198 insertions(+), 2 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index ff83112c554..464ef6da959 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -640,6 +640,13 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + var elasticIndexer indexer.Indexer + if coreServiceContainer == nil || coreServiceContainer.IsInterfaceNil() { + elasticIndexer = nil + } else { + elasticIndexer = coreServiceContainer.Indexer() + } + currentNode, err := createNode( generalConfig, nodesConfig, @@ -657,6 +664,7 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { networkComponents, uint64(ctx.GlobalUint(bootstrapRoundIndex.Name)), version, + elasticIndexer, ) if err != nil { return err @@ -1144,6 +1152,7 @@ func createNode( network *factory.Network, bootstrapRoundIndex uint64, version string, + indexer indexer.Indexer, ) (*node.Node, error) { consensusGroupSize, err := getConsensusGroupSize(nodesConfig, shardCoordinator) if err != nil { @@ -1184,6 +1193,7 @@ func createNode( node.WithTxStorageSize(config.TxStorage.Cache.Size), node.WithBootstrapRoundIndex(bootstrapRoundIndex), node.WithAppStatusHandler(core.StatusHandler), + node.WithIndexer(indexer), ) if err != nil { return nil, errors.New("error creating node: " + err.Error()) diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index c6d455c0449..b53d3d9ebd1 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -17,6 +18,7 @@ type factory struct { worker spos.WorkerHandler appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundsFactory creates a new consensusState object @@ -73,6 +75,11 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } +// SetIndexer method set indexer +func (fct *factory) SetIndexer(indexer indexer.Indexer) { + fct.indexer = indexer +} + // GenerateSubrounds will generate the subrounds used in BLS Cns func (fct *factory) GenerateSubrounds() error { fct.initConsensusThreshold() @@ -140,6 +147,8 @@ func (fct *factory) generateStartRoundSubround() error { return err } + subroundStartRound.SetIndexer(fct.indexer) + fct.consensusCore.Chronology().AddSubround(subroundStartRound) return nil diff --git a/consensus/spos/bn/bnSubroundsFactory.go b/consensus/spos/bn/bnSubroundsFactory.go index 18d2c4b3fdc..81928fe94fa 100644 --- a/consensus/spos/bn/bnSubroundsFactory.go +++ b/consensus/spos/bn/bnSubroundsFactory.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -17,6 +18,7 @@ type factory struct { worker spos.WorkerHandler appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundsFactory creates a new factory for BN subrounds @@ -77,6 +79,11 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } +// SetIndexer method set indexer +func (fct *factory) SetIndexer(indexer indexer.Indexer) { + fct.indexer = indexer +} + // GenerateSubrounds will generate the subrounds used in Belare & Naveen Cns func (fct *factory) GenerateSubrounds() error { fct.initConsensusThreshold() @@ -161,6 +168,8 @@ func (fct *factory) generateStartRoundSubround() error { return err } + subroundStartRound.SetIndexer(fct.indexer) + fct.consensusCore.Chronology().AddSubround(subroundStartRound) return nil diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 4dc9a782458..361df5d4c6b 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -1,12 +1,14 @@ package commonSubround import ( + "bytes" "encoding/hex" "fmt" "time" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -22,6 +24,7 @@ type SubroundStartRound struct { broadcastUnnotarisedBlocks func() appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundStartRound creates a SubroundStartRound object @@ -48,6 +51,7 @@ func NewSubroundStartRound( executeStoredMessages, broadcastUnnotarisedBlocks, statusHandler.NewNilStatusHandler(), + nil, } srStartRound.Job = srStartRound.doStartRoundJob srStartRound.Check = srStartRound.doStartRoundConsensusCheck @@ -85,6 +89,11 @@ func (sr *SubroundStartRound) SetAppStatusHandler(ash core.AppStatusHandler) err return nil } +// SetIndexer method set indexer +func (sr *SubroundStartRound) SetIndexer(indexer indexer.Indexer) { + sr.indexer = indexer +} + // doStartRoundJob method does the job of the subround StartRound func (sr *SubroundStartRound) doStartRoundJob() bool { sr.ResetConsensusState() @@ -147,6 +156,8 @@ func (sr *SubroundStartRound) initCurrentRound() bool { pubKeys := sr.ConsensusGroup() + sr.indexRoundIfNeeded(pubKeys) + selfIndex, err := sr.SelfConsensusGroupIndex() if err != nil { log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", @@ -195,6 +206,26 @@ func (sr *SubroundStartRound) initCurrentRound() bool { return true } +func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { + if sr.indexer == nil || sr.IsInterfaceNil() { + return + } + + validatorsPubKeys := sr.NodesCoordinator().GetValidatorsPubKeys() + shardId := sr.ShardCoordinator().SelfId() + signersIndexes := make([]uint64, 0) + + for _, pubKey := range pubKeys { + for index, value := range validatorsPubKeys[shardId] { + if bytes.Equal([]byte(pubKey), value) { + signersIndexes = append(signersIndexes, uint64(index)) + } + } + } + + sr.indexer.SaveRoundInfo(sr.Rounder().Index(), signersIndexes) +} + func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { currentHeader := sr.Blockchain().GetCurrentBlockHeader() if currentHeader == nil { diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index ce192c09899..776c0478d29 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/bls" "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/sharding" @@ -19,6 +20,7 @@ func GetSubroundsFactory( worker spos.WorkerHandler, consensusType string, appStatusHandler core.AppStatusHandler, + indexer indexer.Indexer, ) (spos.SubroundsFactory, error) { switch consensusType { @@ -29,6 +31,8 @@ func GetSubroundsFactory( } err = subRoundFactoryBls.SetAppStatusHandler(appStatusHandler) + + subRoundFactoryBls.SetIndexer(indexer) if err != nil { return nil, err } @@ -45,6 +49,8 @@ func GetSubroundsFactory( return nil, err } + subRoundFactoryBn.SetIndexer(indexer) + return subRoundFactoryBn, nil } diff --git a/core/indexer/data.go b/core/indexer/data.go index a3ac6d48bbe..3c7c07ba767 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -32,6 +32,7 @@ type Transaction struct { // plus some extra information for ease of search and filter type Block struct { Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` ShardID uint32 `json:"shardId"` Hash string `json:"hash"` Proposer string `json:"proposer"` @@ -44,6 +45,11 @@ type Block struct { PrevHash string `json:"prevHash"` } +// SignersIndexes is a structure containing block signers +type SignersIndexes struct { + SignersIndexes []uint64 `json:"signersIndexes"` +} + // TPS is a structure containing all the fields that need to // be saved for a shard statistic in the database type TPS struct { diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 168280060a4..2278fe3b2c5 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -8,6 +8,7 @@ import ( "io" "math/big" "net/http" + "strconv" "strings" "time" @@ -31,6 +32,8 @@ const txBulkSize = 1000 const txIndex = "transactions" const blockIndex = "blocks" const tpsIndex = "tps" +const validatorsIndex = "validators" +const roundIndex = "rounds" const metachainTpsDocID = "meta" const shardTpsDocIDPrefix = "shard" @@ -110,6 +113,16 @@ func NewElasticIndexer( return nil, err } + err = indexer.checkAndCreateIndex(validatorsIndex, nil) + if err != nil { + return nil, err + } + + err = indexer.checkAndCreateIndex(roundIndex, nil) + if err != nil { + return nil, err + } + return indexer, nil } @@ -198,7 +211,8 @@ func (ei *elasticIndexer) createIndex(index string, body io.Reader) error { func (ei *elasticIndexer) SaveBlock( bodyHandler data.BodyHandler, headerhandler data.HeaderHandler, - txPool map[string]data.TransactionHandler) { + txPool map[string]data.TransactionHandler, +) { if headerhandler == nil || headerhandler.IsInterfaceNil() { ei.logger.Warn(ErrNoHeader.Error()) @@ -223,6 +237,72 @@ func (ei *elasticIndexer) SaveBlock( } } +// SaveRoundInfo will save on elastic search information about round +func (ei *elasticIndexer) SaveRoundInfo(round int64, signersIndexes []uint64) { + var buff bytes.Buffer + + serializedSignersIndexes, err := ei.marshalizer.Marshal(SignersIndexes{SignersIndexes: signersIndexes}) + if err != nil { + ei.logger.Warn("could not marshal signers indexes") + return + } + + buff.Grow(len(serializedSignersIndexes)) + buff.Write(serializedSignersIndexes) + + req := esapi.IndexRequest{ + Index: roundIndex, + DocumentID: strconv.Itoa(int(round)), + Body: bytes.NewReader(buff.Bytes()), + Refresh: "true", + } + + res, err := req.Do(context.Background(), ei.db) + if err != nil { + ei.logger.Warn(fmt.Sprintf("Could not index round informations: %s", err)) + return + } + + defer closeESResponseBody(res) + + if res.IsError() { + ei.logger.Warn(res.String()) + } +} + +//SaveValidatorsPubKeys will sent all validators public keys to elastic search +func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + var buff bytes.Buffer + + serializedValidatorPubKeys, err := ei.marshalizer.Marshal(validatorsPubKeys) + if err != nil { + ei.logger.Warn("could not marshal validators public keys") + return + } + + buff.Grow(len(serializedValidatorPubKeys)) + buff.Write(serializedValidatorPubKeys) + + req := esapi.IndexRequest{ + Index: validatorsIndex, + DocumentID: "validators_list", + Body: bytes.NewReader(buff.Bytes()), + Refresh: "true", + } + + res, err := req.Do(context.Background(), ei.db) + if err != nil { + ei.logger.Warn(fmt.Sprintf("Could not index validators public keys: %s", err)) + return + } + + defer closeESResponseBody(res) + + if res.IsError() { + ei.logger.Warn(res.String()) + } +} + func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.HeaderHandler) ([]byte, []byte) { h, err := ei.marshalizer.Marshal(header) if err != nil { @@ -233,6 +313,7 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea headerHash := ei.hasher.Compute(string(h)) elasticBlock := Block{ Nonce: header.GetNonce(), + Round: header.GetRound(), ShardID: header.GetShardID(), Hash: hex.EncodeToString(headerHash), // TODO: We should add functionality for proposer and validators diff --git a/core/indexer/interface.go b/core/indexer/interface.go index af9bb2afde2..e1f3da8646b 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,6 +9,8 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) + SaveRoundInfo(round int64, signersIndexes []uint64) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) + SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool } diff --git a/node/defineOptions.go b/node/defineOptions.go index ed1a5b41eb3..decc3ccdffb 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" @@ -400,3 +401,12 @@ func WithAppStatusHandler(aph core.AppStatusHandler) Option { return nil } } + +// WithIndexer sets up a indexer for the Node +func WithIndexer(indexer indexer.Indexer) Option { + return func(n *Node) error { + n.indexer = indexer + + return nil + } +} diff --git a/node/node.go b/node/node.go index e7c178d61fe..33ee41ab779 100644 --- a/node/node.go +++ b/node/node.go @@ -16,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/crypto" @@ -100,6 +101,8 @@ type Node struct { txStorageSize uint32 currentSendingGoRoutines int32 bootstrapRoundIndex uint64 + + indexer indexer.Indexer } // ApplyOptions can set up different configurable options of a Node instance @@ -308,7 +311,7 @@ func (n *Node) StartConsensus() error { return err } - fct, err := sposFactory.GetSubroundsFactory(consensusDataContainer, consensusState, worker, n.consensusType, n.appStatusHandler) + fct, err := sposFactory.GetSubroundsFactory(consensusDataContainer, consensusState, worker, n.consensusType, n.appStatusHandler, n.indexer) if err != nil { return err } diff --git a/process/block/metablock.go b/process/block/metablock.go index 063da228db3..981644f3b18 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -132,6 +132,9 @@ func NewMetaProcessor( mp.shardsHeadersNonce = &sync.Map{} + // TODO move this call from here in another place it's not ok to call this on constructor method + mp.indexValidatorsListIfNeeded() + return &mp, nil } @@ -267,6 +270,18 @@ func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[s //TODO: maybe index metablocks also? } +func (mp *metaProcessor) indexValidatorsListIfNeeded() { + if mp.core == nil || mp.core.Indexer() == nil { + return + } + + validatorsPubKeys := mp.nodesCoordinator.GetValidatorsPubKeys() + + if validatorsPubKeys != nil { + go mp.core.Indexer().SaveValidatorsPubKeys(validatorsPubKeys) + } +} + // removeBlockInfoFromPool removes the block info from associated pools func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { if header == nil || header.IsInterfaceNil() { diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 68c0a5ce50a..82b82d37835 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -226,6 +226,19 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, return publicKeys, nil } +// GetValidatorsPubKeys will return all validator public keys for all shards +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPubKeys() map[uint32][][]byte { + validatorsPubKeys := make(map[uint32][][]byte) + + for shardId, shardEligible := range ihgs.nodesMap { + for i := 0; i < len(shardEligible); i++ { + validatorsPubKeys[shardId] = append(validatorsPubKeys[shardId], ihgs.nodesMap[shardId][i].PubKey()) + } + } + + return validatorsPubKeys +} + func (ihgs *indexHashedNodesCoordinator) expandEligibleList(shardId uint32) []Validator { //TODO implement an expand eligible list variant return ihgs.nodesMap[shardId] diff --git a/sharding/interface.go b/sharding/interface.go index fe4a1753597..4b295327ef8 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -39,6 +39,7 @@ type NodesCoordinator interface { // PublicKeysSelector allows retrieval of eligible validators public keys type PublicKeysSelector interface { + GetValidatorsPubKeys() map[uint32][][]byte GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) From de9c9cd9d2c3b93f8e35a49c85121522b8791924 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 23 Sep 2019 16:12:56 +0300 Subject: [PATCH 125/234] EN-3987 - fix after review --- process/block/poolsCleaner/txPoolsCleaner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/poolsCleaner/txPoolsCleaner.go b/process/block/poolsCleaner/txPoolsCleaner.go index 61da3f3fbc0..6e5f5aa4ea4 100644 --- a/process/block/poolsCleaner/txPoolsCleaner.go +++ b/process/block/poolsCleaner/txPoolsCleaner.go @@ -38,7 +38,7 @@ func NewTxsPoolsCleaner( return nil, process.ErrNilDataPoolHolder } transactionPool := dataPool.Transactions() - if transactionPool == nil { + if transactionPool == nil || transactionPool.IsInterfaceNil() { return nil, process.ErrNilTransactionPool } if addrConverter == nil || addrConverter.IsInterfaceNil() { From 32e6214d703d38d8c59612de8411192bd01a9a99 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Mon, 23 Sep 2019 16:52:55 +0300 Subject: [PATCH 126/234] EN-4120: fixes after review --- process/rewardTransaction/export_test.go | 21 +++++++++++ .../interceptedRewardTransaction_test.go | 19 +++++----- process/rewardTransaction/interceptor_test.go | 37 ++++++++++--------- process/rewardTransaction/process_test.go | 29 ++++++++------- 4 files changed, 65 insertions(+), 41 deletions(-) create mode 100644 process/rewardTransaction/export_test.go diff --git a/process/rewardTransaction/export_test.go b/process/rewardTransaction/export_test.go new file mode 100644 index 00000000000..301fd02f5f8 --- /dev/null +++ b/process/rewardTransaction/export_test.go @@ -0,0 +1,21 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// Hasher will return the hasher of InterceptedRewardTransaction for using in test files +func (irt *InterceptedRewardTransaction) Hasher() hashing.Hasher { + return irt.hasher +} + +// Marshalizer will return the marshalizer of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) Marshalizer() marshal.Marshalizer { + return rti.marshalizer +} + +// BroadcastCallbackHandler will call the broadcast callback handler of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) BroadcastCallbackHandler(buffToSend []byte) { + rti.broadcastCallbackHandler(buffToSend) +} diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go index a418a73f930..ea1858af2c5 100644 --- a/process/rewardTransaction/interceptedRewardTransaction_test.go +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -1,4 +1,4 @@ -package rewardTransaction +package rewardTransaction_test import ( "math/big" @@ -8,13 +8,14 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/stretchr/testify/assert" ) func TestNewInterceptedRewardTransaction_NilTxBuffShouldErr(t *testing.T) { t.Parallel() - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -29,7 +30,7 @@ func TestNewInterceptedRewardTransaction_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() txBuff := []byte("tx") - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, nil, &mock.HasherMock{}, @@ -44,7 +45,7 @@ func TestNewInterceptedRewardTransaction_NilHasherShouldErr(t *testing.T) { t.Parallel() txBuff := []byte("tx") - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, &mock.MarshalizerMock{}, nil, @@ -59,7 +60,7 @@ func TestNewInterceptedRewardTransaction_NilAddressConverterShouldErr(t *testing t.Parallel() txBuff := []byte("tx") - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -74,7 +75,7 @@ func TestNewInterceptedRewardTransaction_NilShardCoordinatorShouldErr(t *testing t.Parallel() txBuff := []byte("tx") - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -98,7 +99,7 @@ func TestNewInterceptedRewardTransaction_OkValsShouldWork(t *testing.T) { marshalizer := &mock.MarshalizerMock{} txBuff, _ := marshalizer.Marshal(rewTx) - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, marshalizer, &mock.HasherMock{}, @@ -128,7 +129,7 @@ func TestNewInterceptedRewardTransaction_TestGetters(t *testing.T) { } txBuff, _ := marshalizer.Marshal(rewTx) - irt, err := NewInterceptedRewardTransaction( + irt, err := rewardTransaction.NewInterceptedRewardTransaction( txBuff, marshalizer, &mock.HasherMock{}, @@ -143,6 +144,6 @@ func TestNewInterceptedRewardTransaction_TestGetters(t *testing.T) { assert.Equal(t, &rewTx, irt.RewardTransaction()) assert.False(t, irt.IsAddressedToOtherShards()) - txHash := irt.hasher.Compute(string(txBuff)) + txHash := irt.Hasher().Compute(string(txBuff)) assert.Equal(t, txHash, irt.Hash()) } diff --git a/process/rewardTransaction/interceptor_test.go b/process/rewardTransaction/interceptor_test.go index 826148397aa..e2921faaaaa 100644 --- a/process/rewardTransaction/interceptor_test.go +++ b/process/rewardTransaction/interceptor_test.go @@ -1,4 +1,4 @@ -package rewardTransaction +package rewardTransaction_test import ( "encoding/json" @@ -10,13 +10,14 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/stretchr/testify/assert" ) func TestNewRewardTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( nil, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -31,7 +32,7 @@ func TestNewRewardTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_NilRewardTxPoolShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, nil, &mock.StorerStub{}, @@ -46,7 +47,7 @@ func TestNewRewardTxInterceptor_NilRewardTxPoolShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_NilRewardTxStorerShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, nil, @@ -61,7 +62,7 @@ func TestNewRewardTxInterceptor_NilRewardTxStorerShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -76,7 +77,7 @@ func TestNewRewardTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_NilHasherShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -91,7 +92,7 @@ func TestNewRewardTxInterceptor_NilHasherShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -106,7 +107,7 @@ func TestNewRewardTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewRewardTxInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() - rti, err := NewRewardTxInterceptor( + rti, err := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -122,7 +123,7 @@ func TestNewRewardTxInterceptor_OkValsShouldWork(t *testing.T) { func TestRewardTxInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() - rti, _ := NewRewardTxInterceptor( + rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -137,7 +138,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testin func TestRewardTxInterceptor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { t.Parallel() - rti, _ := NewRewardTxInterceptor( + rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -157,7 +158,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testi t.Parallel() wasCalled := false - rti, _ := NewRewardTxInterceptor( + rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { @@ -176,7 +177,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testi RcvAddr: []byte("rcvr1"), ShardId: 0, } - rewardTxBytes1, _ := rti.marshalizer.Marshal(rewardTx1) + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) rewardTx2 := rewardTx.RewardTx{ Round: 0, @@ -185,7 +186,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testi RcvAddr: []byte("rcvr2"), ShardId: 0, } - rewardTxBytes2, _ := rti.marshalizer.Marshal(rewardTx2) + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) var rewardTxsSlice [][]byte rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) @@ -209,7 +210,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { return uint32(1) } - rti, _ := NewRewardTxInterceptor( + rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { @@ -228,7 +229,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes RcvAddr: []byte("rcvr1"), ShardId: 1, } - rewardTxBytes1, _ := rti.marshalizer.Marshal(rewardTx1) + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) rewardTx2 := rewardTx.RewardTx{ Round: 0, @@ -237,7 +238,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes RcvAddr: []byte("rcvr2"), ShardId: 1, } - rewardTxBytes2, _ := rti.marshalizer.Marshal(rewardTx2) + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) var rewardTxsSlice [][]byte rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) @@ -257,7 +258,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { t.Parallel() - rti, _ := NewRewardTxInterceptor( + rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, @@ -272,6 +273,6 @@ func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { return }) - rti.broadcastCallbackHandler(bytesToSend) + rti.BroadcastCallbackHandler(bytesToSend) assert.Equal(t, bytesToSend, bytesToReceive) } diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index c75949ceab8..758706145c1 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -1,4 +1,4 @@ -package rewardTransaction +package rewardTransaction_test import ( "errors" @@ -10,13 +10,14 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/stretchr/testify/assert" ) func TestNewRewardTxProcessorNilAccountsDbShouldErr(t *testing.T) { t.Parallel() - rtp, err := NewRewardTxProcessor( + rtp, err := rewardTransaction.NewRewardTxProcessor( nil, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -29,7 +30,7 @@ func TestNewRewardTxProcessorNilAccountsDbShouldErr(t *testing.T) { func TestNewRewardTxProcessorNilAddressConverterShouldErr(t *testing.T) { t.Parallel() - rtp, err := NewRewardTxProcessor( + rtp, err := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, nil, mock.NewMultiShardsCoordinatorMock(3), @@ -42,7 +43,7 @@ func TestNewRewardTxProcessorNilAddressConverterShouldErr(t *testing.T) { func TestNewRewardTxProcessorNilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - rtp, err := NewRewardTxProcessor( + rtp, err := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, nil, @@ -55,7 +56,7 @@ func TestNewRewardTxProcessorNilShardCoordinatorShouldErr(t *testing.T) { func TestNewRewardTxProcessorNilRewardTxForwarderShouldErr(t *testing.T) { t.Parallel() - rtp, err := NewRewardTxProcessor( + rtp, err := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -68,7 +69,7 @@ func TestNewRewardTxProcessorNilRewardTxForwarderShouldErr(t *testing.T) { func TestNewRewardTxProcessorOkValsShouldWork(t *testing.T) { t.Parallel() - rtp, err := NewRewardTxProcessor( + rtp, err := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -82,7 +83,7 @@ func TestNewRewardTxProcessorOkValsShouldWork(t *testing.T) { func TestRewardTxProcessor_ProcessRewardTransactionNilTxShouldErr(t *testing.T) { t.Parallel() - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -95,7 +96,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionNilTxShouldErr(t *testing.T) func TestRewardTxProcessor_ProcessRewardTransactionNilTxValueShouldErr(t *testing.T) { t.Parallel() - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -110,7 +111,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionCannotCreateAddressShouldErr( t.Parallel() expectedErr := errors.New("cannot create address") - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterStub{ CreateAddressFromPublicKeyBytesCalled: func(pubKey []byte) (state.AddressContainer, error) { @@ -140,7 +141,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionAddressNotInNodesShardShouldN shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { return uint32(5) } - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{ GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { getAccountWithJournalWasCalled = true @@ -169,7 +170,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionCannotGetAccountShouldErr(t * t.Parallel() expectedErr := errors.New("cannot get account") - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{ GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { return nil, expectedErr @@ -195,7 +196,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionCannotAddIntermediateTxsShoul t.Parallel() expectedErr := errors.New("cannot add intermediate transactions") - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( &mock.AccountsStub{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -226,7 +227,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionWrongTypeAssertionAccountHold }, } - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( accountsDb, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), @@ -265,7 +266,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { }, } - rtp, _ := NewRewardTxProcessor( + rtp, _ := rewardTransaction.NewRewardTxProcessor( accountsDb, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(3), From debc7f4b2cddffd0bfcdc39064e90c15bd8a7d58 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 23 Sep 2019 17:23:52 +0300 Subject: [PATCH 127/234] EN-3981 - fix failing tests, do refactor --- cmd/node/main.go | 16 +++++++ consensus/mock/nodesCoordinatorMock.go | 4 ++ .../spos/commonSubround/subroundStartRound.go | 2 +- core/indexer/elasticsearch_test.go | 1 + core/mock/indexerMock.go | 8 ++++ integrationTests/mock/nodesCoordinatorMock.go | 4 ++ node/mock/nodesCoordinatorMock.go | 4 ++ process/block/metablock.go | 15 ------- process/mock/indexerMock.go | 8 ++++ process/mock/nodesCoordinatorMock.go | 4 ++ sharding/indexHashedNodesCoordinator.go | 4 +- sharding/indexHashedNodesCoordinator_test.go | 45 +++++++++++++++++++ sharding/interface.go | 2 +- 13 files changed, 98 insertions(+), 19 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 464ef6da959..3a2abd069af 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -670,6 +670,10 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + indexValidatorsListIfNeeded(elasticIndexer, nodesCoordinator) + } + vmAccountsDB, err := hooks.NewVMAccountsDB(stateComponents.AccountsAdapter, stateComponents.AddressConverter) if err != nil { return err @@ -741,6 +745,18 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return nil } +func indexValidatorsListIfNeeded(elasticIndexer indexer.Indexer, coordinator sharding.NodesCoordinator) { + if elasticIndexer == nil || elasticIndexer.IsInterfaceNil() { + return + } + + validatorsPubKeys := coordinator.GetAllValidatorsPublicKeys() + + if validatorsPubKeys != nil { + go elasticIndexer.SaveValidatorsPubKeys(validatorsPubKeys) + } +} + func initMetrics( appStatusHandler core.AppStatusHandler, pubKey crypto.PublicKey, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index 56d1b14bb22..be134a61934 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -37,6 +37,10 @@ func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( return list, nil } +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 361df5d4c6b..6ad76d77530 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -211,7 +211,7 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { return } - validatorsPubKeys := sr.NodesCoordinator().GetValidatorsPubKeys() + validatorsPubKeys := sr.NodesCoordinator().GetAllValidatorsPublicKeys() shardId := sr.ShardCoordinator().SelfId() signersIndexes := make([]uint64, 0) diff --git a/core/indexer/elasticsearch_test.go b/core/indexer/elasticsearch_test.go index 7e78c0f64c4..58cc8872c2f 100644 --- a/core/indexer/elasticsearch_test.go +++ b/core/indexer/elasticsearch_test.go @@ -276,6 +276,7 @@ func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { elasticBlock := indexer.Block{ Nonce: header.Nonce, + Round: header.Round, ShardID: header.ShardId, Hash: hex.EncodeToString(headerHash), Proposer: hex.EncodeToString([]byte("mock proposer")), diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 94951d07fa4..573953ca880 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -19,6 +19,14 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } +func (im *IndexerMock) SaveRoundInfo(round int64, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (im *IndexerMock) IsInterfaceNil() bool { if im == nil { diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index 560288d4016..bd27c139d30 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -10,6 +10,10 @@ type NodesCoordinatorMock struct { GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 2acd856559a..74c7a73bb12 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -12,6 +12,10 @@ type NodesCoordinatorMock struct { GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) } +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, diff --git a/process/block/metablock.go b/process/block/metablock.go index 981644f3b18..063da228db3 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -132,9 +132,6 @@ func NewMetaProcessor( mp.shardsHeadersNonce = &sync.Map{} - // TODO move this call from here in another place it's not ok to call this on constructor method - mp.indexValidatorsListIfNeeded() - return &mp, nil } @@ -270,18 +267,6 @@ func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[s //TODO: maybe index metablocks also? } -func (mp *metaProcessor) indexValidatorsListIfNeeded() { - if mp.core == nil || mp.core.Indexer() == nil { - return - } - - validatorsPubKeys := mp.nodesCoordinator.GetValidatorsPubKeys() - - if validatorsPubKeys != nil { - go mp.core.Indexer().SaveValidatorsPubKeys(validatorsPubKeys) - } -} - // removeBlockInfoFromPool removes the block info from associated pools func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { if header == nil || header.IsInterfaceNil() { diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 378e0f44f85..687a191c981 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -20,6 +20,14 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } +func (im *IndexerMock) SaveRoundInfo(round int64, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (im *IndexerMock) IsInterfaceNil() bool { if im == nil { diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index c9bc56f6276..05f8ebc1083 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -50,6 +50,10 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { if ncm.GetSelectedPublicKeysCalled != nil { return ncm.GetSelectedPublicKeysCalled(selection, shardId) diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 82b82d37835..8c0f567655c 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -226,8 +226,8 @@ func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, return publicKeys, nil } -// GetValidatorsPubKeys will return all validator public keys for all shards -func (ihgs *indexHashedNodesCoordinator) GetValidatorsPubKeys() map[uint32][][]byte { +// GetAllValidatorsPublicKeys will return all validators public keys for all shards +func (ihgs *indexHashedNodesCoordinator) GetAllValidatorsPublicKeys() map[uint32][][]byte { validatorsPubKeys := make(map[uint32][][]byte) for shardId, shardEligible := range ihgs.nodesMap { diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 44cc4f0f587..2d676d81a08 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -571,3 +571,48 @@ func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldWork(t *testing assert.Equal(t, uint32(1), shardId) assert.Equal(t, []byte("addr2_shard1"), validator.Address()) } + +func TestIndexHashedGroupSelector_GetAllValidatorsPublicKeys(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + shardOneId := uint32(1) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0"), []byte("pk1_shard0"), []byte("pk2_shard0")}, + shardOneId: {[]byte("pk0_shard1"), []byte("pk1_shard1"), []byte("pk2_shard1")}, + sharding.MetachainShardId: {[]byte("pk0_meta"), []byte("pk1_meta"), []byte("pk2_meta")}, + } + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][0], []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][1], []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][2], []byte("addr2_meta")), + } + listShard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][0], []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][1], []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][2], []byte("addr2_shard0")), + } + listShard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][0], []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][1], []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][2], []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = listMeta + nodesMap[shardZeroId] = listShard0 + nodesMap[shardOneId] = listShard1 + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + shardZeroId, + 2, + nodesMap, + ) + + allValidatorsPublicKeys := ihgs.GetAllValidatorsPublicKeys() + assert.Equal(t, expectedValidatorsPubKeys, allValidatorsPublicKeys) +} diff --git a/sharding/interface.go b/sharding/interface.go index 4b295327ef8..84487325e99 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -39,7 +39,7 @@ type NodesCoordinator interface { // PublicKeysSelector allows retrieval of eligible validators public keys type PublicKeysSelector interface { - GetValidatorsPubKeys() map[uint32][][]byte + GetAllValidatorsPublicKeys() map[uint32][][]byte GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) From f45c84468b5fae87d8b5436b050be3eb79a17a3c Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 23 Sep 2019 20:10:39 +0300 Subject: [PATCH 128/234] process: fix do not flag used miniblocks before commit --- process/block/baseProcess.go | 2 ++ process/block/shardblock.go | 22 +++++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 2e9268b6984..de2c745f639 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -216,6 +216,8 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } if currHdr.GetNonce() != prevHdr.GetNonce()+1 { + // todo remove after test + log.Info(fmt.Sprintf("local nonce is %d, node received block with nonce %d from other chain", prevHdr.GetNonce(), currHdr.GetNonce())) return process.ErrWrongNonceInBlock } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a06f83c8930..446d8c36146 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -868,8 +868,8 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) if !ok { return nil, process.ErrWrongTypeAssertion } - - log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) + // todo: change to debug after test + log.Info(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range miniBlockHashes { @@ -929,6 +929,8 @@ func (sp *shardProcessor) getProcessedMetaBlocks( processedMetaHdrs := make([]data.HeaderHandler, 0) for _, metaBlockKey := range usedMetaBlockHashes { + processedMBs := make(map[string]bool) + obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) if obj == nil { return nil, process.ErrNilMetaBlockHeader @@ -938,17 +940,22 @@ func (sp *shardProcessor) getProcessedMetaBlocks( if !ok { return nil, process.ErrWrongTypeAssertion } - - log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) + // todo: change to debug after test + log.Info(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range miniBlockHashes { + + hash := miniBlockHashes[key] + processedMBs[string(hash)] = metaBlock.GetMiniBlockProcessed(hash) + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] if !ok { continue } - metaBlock.SetMiniBlockProcessed(miniBlockHashes[key], true) + processedMBs[string(hash)] = true + delete(miniBlockHashes, key) } @@ -956,7 +963,7 @@ func (sp *shardProcessor) getProcessedMetaBlocks( processedAll := true for key := range crossMiniBlockHashes { - if !metaBlock.GetMiniBlockProcessed([]byte(key)) { + if !processedMBs[key] { processedAll = false break } @@ -1014,7 +1021,8 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] sp.dataPool.MetaBlocks().Remove(headerHash) sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) - log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", + // todo: change to debug after test + log.Info(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", hdr.GetRound(), hdr.GetNonce(), core.ToB64(headerHash))) From d6d74a7c10b3937e6ef1cb72492cc6e8053febf2 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Mon, 23 Sep 2019 20:36:22 +0300 Subject: [PATCH 129/234] process: iterate over all miniblocks from metablock --- process/block/shardblock.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 446d8c36146..33bb32469db 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -944,17 +944,17 @@ func (sp *shardProcessor) getProcessedMetaBlocks( log.Info(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - - hash := miniBlockHashes[key] - processedMBs[string(hash)] = metaBlock.GetMiniBlockProcessed(hash) + for hash := range crossMiniBlockHashes { + processedMBs[hash] = metaBlock.GetMiniBlockProcessed([]byte(hash)) + } - _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] + for key := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] if !ok { continue } - processedMBs[string(hash)] = true + processedMBs[string(miniBlockHashes[key])] = true delete(miniBlockHashes, key) } From 15cc083b670844df3afb6a29d027df04a89ce78f Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 24 Sep 2019 17:44:13 +0300 Subject: [PATCH 130/234] EN-3754: added tests for metachain rewards --- .../block/executingRewardMiniblocks_test.go | 54 +++++++++++++++---- 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 1f98ad91bba..ac330319db0 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -73,7 +73,8 @@ func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { randomness := generateInitialRandomness(uint32(nbShards)) var headers map[uint32]data.HeaderHandler var consensusNodes map[uint32][]*integrationTests.TestProcessorNode - mapRewardsForAddress := make(map[string]uint32) + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) nbTxsForLeaderAddress := make(map[string]uint32) for i := 0; i < nbBlocksProduced; i++ { @@ -82,15 +83,15 @@ func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { for shardId, consensusGroup := range consensusNodes { shardRewardData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() addrRewards := shardRewardData.Addresses - updateExpectedRewards(mapRewardsForAddress, addrRewards) + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) nbTxs := getTransactionsFromHeaderInShard(t, headers, shardId) - - // without metachain nodes for now if len(addrRewards) > 0 { updateNumberTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) } } + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) @@ -100,7 +101,8 @@ func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { time.Sleep(time.Second) - verifyRewards(t, nodesMap, mapRewardsForAddress, nbTxsForLeaderAddress, gasPrice, gasLimit) + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, gasPrice, gasLimit) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) } func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { @@ -148,7 +150,8 @@ func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { randomness := generateInitialRandomness(uint32(nbShards)) var headers map[uint32]data.HeaderHandler var consensusNodes map[uint32][]*integrationTests.TestProcessorNode - mapRewardsForAddress := make(map[string]uint32) + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) nbTxsForLeaderAddress := make(map[string]uint32) for i := 0; i < nbBlocksProduced; i++ { @@ -166,9 +169,11 @@ func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { } addrRewards := shardRewardsData.Addresses - updateExpectedRewards(mapRewardsForAddress, addrRewards) + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) } + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) @@ -178,7 +183,8 @@ func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { time.Sleep(time.Second) - verifyRewards(t, nodesMap, mapRewardsForAddress, nbTxsForLeaderAddress, 0, 0) + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, 0, 0) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) } func generateIntraShardTransactions( @@ -299,7 +305,37 @@ func updateNumberTransactionsProposed( transactionsForLeader[addressProposer] += nbTransactions } -func verifyRewards( +func updateRewardsForMetachain(rewardsMap map[string]uint32, consensusNode *integrationTests.TestProcessorNode) { + metaRewardDataSlice := consensusNode.SpecialAddressHandler.ConsensusMetaRewardData() + if len(metaRewardDataSlice) > 0 { + for _, metaRewardData := range metaRewardDataSlice { + for _, addr := range metaRewardData.Addresses { + rewardsMap[addr]++ + } + } + } +} + +func verifyRewardsForMetachain( + t *testing.T, + mapRewardsForMeta map[string]uint32, + nodes map[uint32][]*integrationTests.TestProcessorNode, +) { + + // TODO this should be read from protocol config + rewardValue := uint32(1000) + + for metaAddr, numOfTimesRewarded := range mapRewardsForMeta { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(metaAddr)) + acc, err := nodes[0][0].AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + expectedBalance := big.NewInt(int64(numOfTimesRewarded * rewardValue)) + assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) + } +} + +func verifyRewardsForShards( t *testing.T, nodesMap map[uint32][]*integrationTests.TestProcessorNode, mapRewardsForAddress map[string]uint32, From 50b736938e70532ad03c2a19c23520e79c8a0681 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 24 Sep 2019 18:04:53 +0300 Subject: [PATCH 131/234] EN-4120: fixes after review --- process/block/preprocess/rewardsHandler_test.go | 1 + process/rewardTransaction/process_test.go | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 35e0407476c..eb5512fcc4b 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -526,6 +526,7 @@ func TestRewardsHandler_CreateBlockStartedShouldCreateProtocolReward(t *testing. ) assert.Nil(t, th.protocolRewards) + th.CreateBlockStarted() assert.NotNil(t, th.protocolRewards) } diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index 758706145c1..800940e1431 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestNewRewardTxProcessorNilAccountsDbShouldErr(t *testing.T) { +func TestNewRewardTxProcessor_NilAccountsDbShouldErr(t *testing.T) { t.Parallel() rtp, err := rewardTransaction.NewRewardTxProcessor( @@ -27,7 +27,7 @@ func TestNewRewardTxProcessorNilAccountsDbShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilAccountsAdapter, err) } -func TestNewRewardTxProcessorNilAddressConverterShouldErr(t *testing.T) { +func TestNewRewardTxProcessor_NilAddressConverterShouldErr(t *testing.T) { t.Parallel() rtp, err := rewardTransaction.NewRewardTxProcessor( @@ -40,7 +40,7 @@ func TestNewRewardTxProcessorNilAddressConverterShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilAddressConverter, err) } -func TestNewRewardTxProcessorNilShardCoordinatorShouldErr(t *testing.T) { +func TestNewRewardTxProcessor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() rtp, err := rewardTransaction.NewRewardTxProcessor( @@ -53,7 +53,7 @@ func TestNewRewardTxProcessorNilShardCoordinatorShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilShardCoordinator, err) } -func TestNewRewardTxProcessorNilRewardTxForwarderShouldErr(t *testing.T) { +func TestNewRewardTxProcessor_NilRewardTxForwarderShouldErr(t *testing.T) { t.Parallel() rtp, err := rewardTransaction.NewRewardTxProcessor( @@ -66,7 +66,7 @@ func TestNewRewardTxProcessorNilRewardTxForwarderShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) } -func TestNewRewardTxProcessorOkValsShouldWork(t *testing.T) { +func TestNewRewardTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() rtp, err := rewardTransaction.NewRewardTxProcessor( From 1bbe46578dfc0e3dcffd3ddbc0601f46de9f13b2 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Wed, 25 Sep 2019 13:46:08 +0300 Subject: [PATCH 132/234] EN-4120: fixes after review --- cmd/node/main.go | 7 ++- statusHandler/view/termuic/termuiConsole.go | 55 +++++++++++++++---- .../termuic/termuiRenders/widgetsRender.go | 11 +++- 3 files changed, 57 insertions(+), 16 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 1e666436696..21543bd60b1 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -558,6 +558,9 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + txSignPk := factory.GetPkEncoded(cryptoComponents.TxSignPubKey) + coreComponents.StatusHandler.SetStringValue(core.MetricPublicKeyTxSign, txSignPk) + sessionInfoFileOutput := fmt.Sprintf("%s:%s\n%s:%s\n%s:%s\n%s:%v\n%s:%s\n%s:%v\n", "PkBlockSign", factory.GetPkEncoded(pubKey), "PkAccount", factory.GetPkEncoded(cryptoComponents.TxSignPubKey), @@ -575,9 +578,6 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - txSignPk := factory.GetPkEncoded(cryptoComponents.TxSignPubKey) - coreComponents.StatusHandler.SetStringValue(core.MetricPublicKeyTxSign, txSignPk) - err = ioutil.WriteFile(filepath.Join(logDirectory, "session.info"), []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) @@ -749,6 +749,7 @@ func initMetrics( appStatusHandler.SetUInt64Value(core.MetricNumShardHeadersFromPool, initUint) appStatusHandler.SetUInt64Value(core.MetricNumShardHeadersProcessed, initUint) appStatusHandler.SetUInt64Value(core.MetricNumTimesInForkChoice, initUint) + appStatusHandler.SetStringValue(core.MetricPublicKeyTxSign, initString) } func startStatusPolling( diff --git a/statusHandler/view/termuic/termuiConsole.go b/statusHandler/view/termuic/termuiConsole.go index 383a60590b2..bf738008009 100644 --- a/statusHandler/view/termuic/termuiConsole.go +++ b/statusHandler/view/termuic/termuiConsole.go @@ -3,6 +3,8 @@ package termuic import ( "os" "os/signal" + "sync" + "sync/atomic" "syscall" "time" @@ -16,6 +18,10 @@ import ( //refreshInterval is used for a ticker that refresh termui console at a specific interval const refreshInterval = time.Second +// numOfTicksBeforeRedrawing represents the number of ticks which have to pass until a fake resize will be made +// in order to clean the unwanted appeared characters +const numOfTicksBeforeRedrawing = 10 + var log = logger.DefaultLogger() // TermuiConsole data where is store data from handler @@ -23,6 +29,7 @@ type TermuiConsole struct { presenter view.Presenter consoleRender TermuiRender grid *termuiRenders.DrawableContainer + mutRefresh *sync.RWMutex } //NewTermuiConsole method is used to return a new TermuiConsole structure @@ -32,7 +39,8 @@ func NewTermuiConsole(presenter view.Presenter) (*TermuiConsole, error) { } tc := TermuiConsole{ - presenter: presenter, + presenter: presenter, + mutRefresh: &sync.RWMutex{}, } return &tc, nil @@ -75,14 +83,12 @@ func (tc *TermuiConsole) eventLoop() { signal.Notify(sigTerm, os.Interrupt, syscall.SIGTERM) tc.consoleRender.RefreshData() + ticksCounter := uint32(0) for { select { case <-time.After(refreshInterval): - tc.consoleRender.RefreshData() - ui.Clear() - ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) - + tc.doChanges(&ticksCounter) case <-sigTerm: ui.Clear() return @@ -95,14 +101,43 @@ func (tc *TermuiConsole) eventLoop() { func (tc *TermuiConsole) processUiEvents(e ui.Event) { switch e.ID { case "": - payload := e.Payload.(ui.Resize) - tc.grid.SetRectangle(0, 0, payload.Width, payload.Height) - ui.Clear() - ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) - + tc.doResizeEvent(e) case "": ui.Close() StopApplication() return } } + +func (tc *TermuiConsole) doChanges(counter *uint32) { + atomic.AddUint32(counter, 1) + if atomic.LoadUint32(counter) > numOfTicksBeforeRedrawing { + tc.doFakeResize() + atomic.StoreUint32(counter, 0) + } else { + tc.refreshWindow() + } +} + +func (tc *TermuiConsole) doResizeEvent(e ui.Event) { + payload := e.Payload.(ui.Resize) + tc.doResize(payload.Width, payload.Height) +} + +func (tc *TermuiConsole) doFakeResize() { + tc.doResize(ui.TerminalDimensions()) +} + +func (tc *TermuiConsole) doResize(width int, height int) { + tc.grid.SetRectangle(0, 0, width, height) + tc.refreshWindow() +} + +func (tc *TermuiConsole) refreshWindow() { + tc.mutRefresh.Lock() + defer tc.mutRefresh.Unlock() + + tc.consoleRender.RefreshData() + ui.Clear() + ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) +} diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 4151f6d3a5a..6b4064365c0 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -273,10 +273,15 @@ func (wr *WidgetsRender) prepareListWithLogsForDisplay() { wr.lLog.WrapText = true } -func (wr *WidgetsRender) prepareLogLines(logData []string, size int) []string { +func (wr *WidgetsRender) prepareLogLines(logData []string, maxSize int) []string { logDataLen := len(logData) - if logDataLen > size { - return logData[logDataLen-size : logDataLen] + maxSize = maxSize - 2 // decrease 2 units as the total maxSize of the log list includes also the header and the footer + if maxSize < 0 { + maxSize = 0 + } + + if logDataLen > maxSize { + return logData[(logDataLen - maxSize):logDataLen] } return logData From 5454d0602d14eacd337b1a8aaf4b3ce6042cfd58 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 25 Sep 2019 13:51:18 +0300 Subject: [PATCH 133/234] EN-3981 : small improve --- consensus/spos/commonSubround/subroundStartRound.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 6ad76d77530..7589e3e28f5 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -223,7 +223,8 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { } } - sr.indexer.SaveRoundInfo(sr.Rounder().Index(), signersIndexes) + round := sr.Rounder().Index() + go sr.indexer.SaveRoundInfo(round, signersIndexes) } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { From 5650ed77e0155d4c68bfa477ddf76a8b7afae949 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 25 Sep 2019 14:51:29 +0300 Subject: [PATCH 134/234] added sync integration test for bootstrapping a meta node that receives a shard block that has a higher round than the current round that the node is syncing --- .../metablock/blocksDissemination_test.go | 107 ++++++++--------- integrationTests/sync/basicSync_test.go | 10 -- integrationTests/sync/common.go | 110 ++++++++++++++++++ integrationTests/sync/edgeCases_test.go | 101 ++++++++++++++++ integrationTests/testInitializer.go | 18 +++ 5 files changed, 284 insertions(+), 62 deletions(-) create mode 100644 integrationTests/sync/common.go create mode 100644 integrationTests/sync/edgeCases_test.go diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index 23a9e90a2f3..a431f5673e9 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -11,7 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) @@ -108,13 +108,16 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { shardHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(hdr) shardHeaderHash := integrationTests.TestHasher.Compute(string(shardHeaderBytes)) nodes[0].ShardDataPool.Headers().HasOrAdd(shardHeaderHash, hdr) + m := &dataPool.ShardIdHashSyncMap{} + m.Store(0, shardHeaderHash) + nodes[0].ShardDataPool.HeadersNonces().Merge(1, m) maxNumRequests := 5 for i := 0; i < maxNumRequests; i++ { for j := 0; j < numMetaNodes; j++ { resolver, err := nodes[j+1].ResolverFinder.CrossShardResolver(factory.ShardHeadersForMetachainTopic, senderShard) assert.Nil(t, err) - _ = resolver.RequestDataFromHash(shardHeaderHash) + _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(1) } fmt.Println(integrationTests.MakeDisplayTable(nodes)) @@ -127,54 +130,54 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterHdrRecv)) } - fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its hash...") - _, metaHdr, _ := nodes[1].ProposeBlock(1, 1) - metaHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(metaHdr) - metaHeaderHash := integrationTests.TestHasher.Compute(string(metaHeaderBytes)) - for i := 0; i < numMetaNodes; i++ { - nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash, metaHdr) - } - - for i := 0; i < maxNumRequests; i++ { - resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) - assert.Nil(t, err) - _ = resolver.RequestDataFromHash(metaHeaderHash) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - - time.Sleep(time.Second) - } - - //all node should have received the meta header - for _, n := range nodes { - assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) - } - - fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its nonce...") - _, metaHdr2, _ := nodes[1].ProposeBlock(2, 2) - metaHdr2.SetNonce(64) - metaHeaderBytes2, _ := integrationTests.TestMarshalizer.Marshal(metaHdr2) - metaHeaderHash2 := integrationTests.TestHasher.Compute(string(metaHeaderBytes2)) - for i := 0; i < numMetaNodes; i++ { - nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash2, metaHdr2) - - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, metaHeaderHash2) - nodes[i+1].MetaDataPool.HeadersNonces().Merge(metaHdr2.GetNonce(), syncMap) - } - - for i := 0; i < maxNumRequests; i++ { - resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) - assert.Nil(t, err) - _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(metaHdr2.GetNonce()) - - fmt.Println(integrationTests.MakeDisplayTable(nodes)) - - time.Sleep(time.Second) - } - - //all node should have received the meta header - for _, n := range nodes { - assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterMetaRcv)) - } + //fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its hash...") + //_, metaHdr, _ := nodes[1].ProposeBlock(1, 1) + //metaHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(metaHdr) + //metaHeaderHash := integrationTests.TestHasher.Compute(string(metaHeaderBytes)) + //for i := 0; i < numMetaNodes; i++ { + // nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash, metaHdr) + //} + // + //for i := 0; i < maxNumRequests; i++ { + // resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + // assert.Nil(t, err) + // _ = resolver.RequestDataFromHash(metaHeaderHash) + // + // fmt.Println(integrationTests.MakeDisplayTable(nodes)) + // + // time.Sleep(time.Second) + //} + // + ////all node should have received the meta header + //for _, n := range nodes { + // assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) + //} + // + //fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its nonce...") + //_, metaHdr2, _ := nodes[1].ProposeBlock(2, 2) + //metaHdr2.SetNonce(64) + //metaHeaderBytes2, _ := integrationTests.TestMarshalizer.Marshal(metaHdr2) + //metaHeaderHash2 := integrationTests.TestHasher.Compute(string(metaHeaderBytes2)) + //for i := 0; i < numMetaNodes; i++ { + // nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash2, metaHdr2) + // + // syncMap := &dataPool.ShardIdHashSyncMap{} + // syncMap.Store(sharding.MetachainShardId, metaHeaderHash2) + // nodes[i+1].MetaDataPool.HeadersNonces().Merge(metaHdr2.GetNonce(), syncMap) + //} + // + //for i := 0; i < maxNumRequests; i++ { + // resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + // assert.Nil(t, err) + // _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(metaHdr2.GetNonce()) + // + // fmt.Println(integrationTests.MakeDisplayTable(nodes)) + // + // time.Sleep(time.Second) + //} + // + ////all node should have received the meta header + //for _, n := range nodes { + // assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterMetaRcv)) + //} } diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 8ed852a0439..c2548151c44 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -13,10 +13,6 @@ import ( "github.com/stretchr/testify/assert" ) -var stepDelay = time.Second -var delayP2pBootstrap = time.Second * 2 -var stepSync = time.Second * 2 - func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -197,9 +193,3 @@ func testAllNodesHaveSameLastBlock(t *testing.T, nodes []*integrationTests.TestP assert.Equal(t, 1, len(mapBlocksByHash)) } - -func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { - for _, n := range nodes { - n.Rounder.IndexField = int64(round) - } -} diff --git a/integrationTests/sync/common.go b/integrationTests/sync/common.go new file mode 100644 index 00000000000..fcc0659c5be --- /dev/null +++ b/integrationTests/sync/common.go @@ -0,0 +1,110 @@ +package sync + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +var stepDelay = time.Second +var delayP2pBootstrap = time.Second * 2 +var stepSync = time.Second * 2 + +func setupSyncNodesOneShardAndMeta( + numNodesPerShard int, + numNodesMeta int) ([]*integrationTests.TestProcessorNode, p2p.Messenger, []int) { + + maxShards := uint32(1) + shardId := uint32(0) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + + nodes := make([]*integrationTests.TestProcessorNode, 0) + for i := 0; i < numNodesPerShard; i++ { + shardNode := integrationTests.NewTestSyncNode( + maxShards, + shardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, shardNode) + } + idxProposerShard0 := 0 + + for i := 0; i < numNodesMeta; i++ { + metaNode := integrationTests.NewTestSyncNode( + maxShards, + sharding.MetachainShardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, metaNode) + } + idxProposerMeta := numNodesPerShard + + idxProposers := []int{idxProposerShard0, idxProposerMeta} + + return nodes, advertiser, idxProposers +} + +func startSyncingBlocks(nodes []*integrationTests.TestProcessorNode) { + for _, n := range nodes { + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes to start syncing blocks...") + time.Sleep(stepDelay) +} + +func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { + for _, n := range nodes { + n.Rounder.IndexField = int64(round) + } +} + +func proposeAndSyncBlocks( + nodes []*integrationTests.TestProcessorNode, + round *uint64, + idxProposers []int, + nonces []*uint64, + numOfRounds int, +) { + + for i := 0; i < numOfRounds; i++ { + crtRound := atomic.LoadUint64(round) + proposeBlocks(nodes, idxProposers, nonces, crtRound) + + time.Sleep(stepSync) + + crtRound = integrationTests.IncrementAndPrintRound(crtRound) + atomic.StoreUint64(round, crtRound) + updateRound(nodes, crtRound) + incrementNonces(nonces) + } + time.Sleep(stepSync) +} + +func incrementNonces(nonces []*uint64) { + for i := 0; i < len(nonces); i++ { + atomic.AddUint64(nonces[i], 1) + } +} + +func proposeBlocks( + nodes []*integrationTests.TestProcessorNode, + idxProposers []int, + nonces []*uint64, + crtRound uint64, +) { + for idx, proposer := range idxProposers { + crtNonce := atomic.LoadUint64(nonces[idx]) + integrationTests.ProposeBlock(nodes, []int{proposer}, crtRound, crtNonce) + } +} diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go new file mode 100644 index 00000000000..44fd83a71cf --- /dev/null +++ b/integrationTests/sync/edgeCases_test.go @@ -0,0 +1,101 @@ +package sync + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +// TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard tests the following scenario: +// 1. Meta and shard 0 are in sync, shard 0 stops producing blocks +// 2. Shard 0 resumes producing block, having a new block with nonce 5 in a higher round than notarized by metachain +// 3. A bootstrapping meta node should be able to pass block with nonce 4 +func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numNodesPerShard := 3 + numNodesMeta := 3 + + nodes, advertiser, idxProposers := setupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) + idxProposerMeta := idxProposers[1] + defer integrationTests.CloseProcessorNodes(nodes, advertiser) + + integrationTests.StartP2pBootstrapOnProcessorNodes(nodes) + startSyncingBlocks(nodes) + + round := uint64(0) + idxNonceMeta := 1 + nonces := []*uint64{new(uint64), new(uint64)} + + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + incrementNonces(nonces) + + numRoundsBlocksAreProposedCorrectly := 2 + proposeAndSyncBlocks( + nodes, + &round, + idxProposers, + nonces, + numRoundsBlocksAreProposedCorrectly, + ) + + numRoundsBlocksAreProposedOnlyByMeta := 2 + proposeAndSyncBlocks( + nodes, + &round, + []int{idxProposerMeta}, + []*uint64{nonces[idxNonceMeta]}, + numRoundsBlocksAreProposedOnlyByMeta, + ) + + secondNumRoundsBlocksAreProposedCorrectly := 2 + proposeAndSyncBlocks( + nodes, + &round, + idxProposers, + nonces, + secondNumRoundsBlocksAreProposedCorrectly, + ) + + maxShards := uint32(1) + shardId := uint32(0) + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + syncMetaNode := integrationTests.NewTestSyncNode( + maxShards, + sharding.MetachainShardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, syncMetaNode) + syncMetaNode.Rounder.IndexField = int64(round) + + syncNodesSlice := []*integrationTests.TestProcessorNode{syncMetaNode} + integrationTests.StartP2pBootstrapOnProcessorNodes(syncNodesSlice) + startSyncingBlocks(syncNodesSlice) + + //after joining the network we must propose a new block on the metachain as to be received by the sync + //node and to start the bootstrapping process + proposeAndSyncBlocks( + nodes, + &round, + []int{idxProposerMeta}, + []*uint64{nonces[idxNonceMeta]}, + 1, + ) + + numOfRoundsToWaitToCatchUp := numRoundsBlocksAreProposedCorrectly + + numRoundsBlocksAreProposedOnlyByMeta + + secondNumRoundsBlocksAreProposedCorrectly + time.Sleep(stepSync * time.Duration(numOfRoundsToWaitToCatchUp)) + updateRound(nodes, round) + + nonceProposerMeta := nodes[idxProposerMeta].BlockChain.GetCurrentBlockHeader().GetNonce() + nonceSyncNode := syncMetaNode.BlockChain.GetCurrentBlockHeader().GetNonce() + assert.Equal(t, nonceProposerMeta, nonceSyncNode) +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index f2cc989a9ce..d11969c3c08 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1116,3 +1116,21 @@ func WaitForBootstrapAndShowConnected(peers []p2p.Messenger, durationBootstrapin fmt.Printf("Peer %s is connected to %d peers\n", peer.ID().Pretty(), len(peer.ConnectedPeers())) } } + +// CloseProcessorNodes closes the used TestProcessorNodes and advertiser +func CloseProcessorNodes(nodes []*TestProcessorNode, advertiser p2p.Messenger) { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } +} + +// StartP2pBootstrapOnProcessorNodes will start the p2p discovery on processor nodes and wait a predefined time +func StartP2pBootstrapOnProcessorNodes(nodes []*TestProcessorNode) { + for _, n := range nodes { + _ = n.Messenger.Bootstrap() + } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(p2pBootstrapStepDelay) +} From 277726be4af72892a693b536680341aa431e99ca Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 25 Sep 2019 15:06:01 +0300 Subject: [PATCH 135/234] EN-3981 : add shardId in elastic search --- consensus/spos/commonSubround/subroundStartRound.go | 2 +- core/indexer/data.go | 5 +++-- core/indexer/elasticsearch.go | 4 ++-- core/indexer/interface.go | 2 +- core/mock/indexerMock.go | 2 +- process/mock/indexerMock.go | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 7589e3e28f5..36dbf90502e 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -224,7 +224,7 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { } round := sr.Rounder().Index() - go sr.indexer.SaveRoundInfo(round, signersIndexes) + go sr.indexer.SaveRoundInfo(round, shardId, signersIndexes) } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/core/indexer/data.go b/core/indexer/data.go index 3c7c07ba767..98d6991e681 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -45,9 +45,10 @@ type Block struct { PrevHash string `json:"prevHash"` } -// SignersIndexes is a structure containing block signers -type SignersIndexes struct { +// RoundInfo is a structure containing block signers and shard id +type RoundInfo struct { SignersIndexes []uint64 `json:"signersIndexes"` + ShardId uint32 `json:"shardId"` } // TPS is a structure containing all the fields that need to diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 2278fe3b2c5..224137e40c7 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -238,10 +238,10 @@ func (ei *elasticIndexer) SaveBlock( } // SaveRoundInfo will save on elastic search information about round -func (ei *elasticIndexer) SaveRoundInfo(round int64, signersIndexes []uint64) { +func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { var buff bytes.Buffer - serializedSignersIndexes, err := ei.marshalizer.Marshal(SignersIndexes{SignersIndexes: signersIndexes}) + serializedSignersIndexes, err := ei.marshalizer.Marshal(RoundInfo{SignersIndexes: signersIndexes, ShardId: shardId}) if err != nil { ei.logger.Warn("could not marshal signers indexes") return diff --git a/core/indexer/interface.go b/core/indexer/interface.go index e1f3da8646b..a176ce4ca77 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,7 +9,7 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) - SaveRoundInfo(round int64, signersIndexes []uint64) + SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 573953ca880..c76acca4252 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -19,7 +19,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, signersIndexes []uint64) { +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { panic("implement me") } diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 687a191c981..853ff65f970 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -20,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, signersIndexes []uint64) { +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { panic("implement me") } From 18b844df77bcbfa41735f4e815a473ebb258b053 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 25 Sep 2019 16:12:44 +0300 Subject: [PATCH 136/234] EN-3981 : add shardId in elastic search --- core/indexer/data.go | 25 +++++++++++++------------ core/indexer/elasticsearch.go | 22 ++++++++++++---------- core/indexer/elasticsearch_test.go | 2 +- core/indexer/export_test.go | 4 ++-- core/indexer/interface.go | 2 +- core/mock/indexerMock.go | 2 +- process/block/shardblock.go | 19 ++++++++++++++++++- process/mock/indexerMock.go | 2 +- 8 files changed, 49 insertions(+), 29 deletions(-) diff --git a/core/indexer/data.go b/core/indexer/data.go index 98d6991e681..53dcc57f2db 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -31,18 +31,19 @@ type Transaction struct { // to be saved for a block. It has all the default fields // plus some extra information for ease of search and filter type Block struct { - Nonce uint64 `json:"nonce"` - Round uint64 `json:"round"` - ShardID uint32 `json:"shardId"` - Hash string `json:"hash"` - Proposer string `json:"proposer"` - Validators []string `json:"validators"` - PubKeyBitmap string `json:"pubKeyBitmap"` - Size int64 `json:"size"` - Timestamp time.Duration `json:"timestamp"` - TxCount uint32 `json:"txCount"` - StateRootHash string `json:"stateRootHash"` - PrevHash string `json:"prevHash"` + Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` + ShardID uint32 `json:"shardId"` + Hash string `json:"hash"` + Proposer string `json:"proposer"` + Validators []string `json:"validators"` + PubKeyBitmap string `json:"pubKeyBitmap"` + Size int64 `json:"size"` + Timestamp time.Duration `json:"timestamp"` + TxCount uint32 `json:"txCount"` + StateRootHash string `json:"stateRootHash"` + PrevHash string `json:"prevHash"` + SignersIndexes []uint64 `json:"signersIndexes"` } // RoundInfo is a structure containing block signers and shard id diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 224137e40c7..922349da772 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -212,6 +212,7 @@ func (ei *elasticIndexer) SaveBlock( bodyHandler data.BodyHandler, headerhandler data.HeaderHandler, txPool map[string]data.TransactionHandler, + signersIndexes []uint64, ) { if headerhandler == nil || headerhandler.IsInterfaceNil() { @@ -225,7 +226,7 @@ func (ei *elasticIndexer) SaveBlock( return } - go ei.saveHeader(headerhandler) + go ei.saveHeader(headerhandler, signersIndexes) if len(body) == 0 { ei.logger.Warn(ErrNoMiniblocks.Error()) @@ -303,7 +304,7 @@ func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][ } } -func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.HeaderHandler) ([]byte, []byte) { +func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.HeaderHandler, signersIndexes []uint64) ([]byte, []byte) { h, err := ei.marshalizer.Marshal(header) if err != nil { ei.logger.Warn("could not marshal header") @@ -319,12 +320,13 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea // TODO: We should add functionality for proposer and validators Proposer: hex.EncodeToString([]byte("mock proposer")), //Validators: "mock validators", - PubKeyBitmap: hex.EncodeToString(header.GetPubKeysBitmap()), - Size: int64(len(h)), - Timestamp: time.Duration(header.GetTimeStamp()), - TxCount: header.GetTxCount(), - StateRootHash: hex.EncodeToString(header.GetRootHash()), - PrevHash: hex.EncodeToString(header.GetPrevHash()), + PubKeyBitmap: hex.EncodeToString(header.GetPubKeysBitmap()), + Size: int64(len(h)), + Timestamp: time.Duration(header.GetTimeStamp()), + TxCount: header.GetTxCount(), + StateRootHash: hex.EncodeToString(header.GetRootHash()), + PrevHash: hex.EncodeToString(header.GetPrevHash()), + SignersIndexes: signersIndexes, } serializedBlock, err := json.Marshal(elasticBlock) @@ -336,10 +338,10 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea return serializedBlock, headerHash } -func (ei *elasticIndexer) saveHeader(header data.HeaderHandler) { +func (ei *elasticIndexer) saveHeader(header data.HeaderHandler, signersIndexes []uint64) { var buff bytes.Buffer - serializedBlock, headerHash := ei.getSerializedElasticBlockAndHeaderHash(header) + serializedBlock, headerHash := ei.getSerializedElasticBlockAndHeaderHash(header, signersIndexes) buff.Grow(len(serializedBlock)) buff.Write(serializedBlock) diff --git a/core/indexer/elasticsearch_test.go b/core/indexer/elasticsearch_test.go index 58cc8872c2f..51fc51ab3ee 100644 --- a/core/indexer/elasticsearch_test.go +++ b/core/indexer/elasticsearch_test.go @@ -268,7 +268,7 @@ func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { ei := indexer.NewTestElasticIndexer(url, username, password, shardCoordinator, marshalizer, hasher, log, &indexer.Options{}) header := newTestBlockHeader() - serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header) + serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header, nil) h, _ := marshalizer.Marshal(header) expectedHeaderHash := hasher.Compute(string(h)) diff --git a/core/indexer/export_test.go b/core/indexer/export_test.go index cd9913c9e24..ae0004c0a8d 100644 --- a/core/indexer/export_test.go +++ b/core/indexer/export_test.go @@ -42,8 +42,8 @@ func NewTestElasticIndexer( return ElasticIndexer{indexer} } -func (ei *ElasticIndexer) GetSerializedElasticBlockAndHeaderHash(header data.HeaderHandler) ([]byte, []byte) { - return ei.getSerializedElasticBlockAndHeaderHash(header) +func (ei *ElasticIndexer) GetSerializedElasticBlockAndHeaderHash(header data.HeaderHandler, signersIndexes []uint64) ([]byte, []byte) { + return ei.getSerializedElasticBlockAndHeaderHash(header, signersIndexes) } func (ei *ElasticIndexer) BuildTransactionBulks( diff --git a/core/indexer/interface.go b/core/indexer/interface.go index a176ce4ca77..311a4a9de6f 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -8,7 +8,7 @@ import ( // Indexer is an interface for saving node specific data to other storage. // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { - SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) + SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index c76acca4252..5440361c6be 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -11,7 +11,7 @@ type IndexerMock struct { SaveBlockCalled func(body block.Body, header *block.Header) } -func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { panic("implement me") } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a3fce3c0a30..3d71a6f2797 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,6 +1,7 @@ package block import ( + "bytes" "fmt" "sort" "sync" @@ -429,7 +430,23 @@ func (sp *shardProcessor) indexBlockIfNeeded( txPool[hash] = tx } - go sp.core.Indexer().SaveBlock(body, header, txPool) + shardId := sp.shardCoordinator.SelfId() + pubKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(header.GetPrevRandSeed(), header.GetRound(), shardId) + if err != nil { + return + } + validatorsPubKeys := sp.nodesCoordinator.GetAllValidatorsPublicKeys() + signersIndexes := make([]uint64, 0) + + for _, pubKey := range pubKeys { + for index, value := range validatorsPubKeys[shardId] { + if bytes.Equal([]byte(pubKey), value) { + signersIndexes = append(signersIndexes, uint64(index)) + } + } + } + + go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 853ff65f970..e332c3c6f3d 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -10,7 +10,7 @@ type IndexerMock struct { SaveBlockCalled func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) } -func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { if im.SaveBlockCalled != nil { im.SaveBlockCalled(body, header, txPool) } From 4df55915fc4eee3c1214d56f9d5f97df84753a8c Mon Sep 17 00:00:00 2001 From: Sebastian Marian <36901555+SebastianMarian@users.noreply.github.com> Date: Wed, 25 Sep 2019 18:35:17 +0300 Subject: [PATCH 137/234] En 4102 improve fork detector (#480) * Fixed bug * Added unit tests and refactored some variables names * Restored old config.toml * Fixed a typo * Changed some print info * Fixed after review --- process/block/baseProcess.go | 44 ++++++---- process/block/baseProcess_test.go | 15 +--- process/block/metablock.go | 79 +++++++++++++++-- process/block/metablock_test.go | 23 +++-- process/block/preprocess/basePreProcess.go | 15 ---- .../block/preprocess/smartContractResults.go | 21 ++--- .../preprocess/smartContractResults_test.go | 5 +- process/block/preprocess/transactions.go | 88 ++++++++++++++----- process/block/shardblock.go | 85 +++++++++++++++--- process/block/shardblock_test.go | 21 ++--- process/constants.go | 1 + process/coordinator/process.go | 13 +-- process/coordinator/process_test.go | 9 +- process/errors.go | 32 ++++--- process/interface.go | 4 +- process/mock/preprocessorMock.go | 6 +- process/mock/transactionCoordinatorMock.go | 6 +- process/sync/baseForkDetector.go | 20 +++++ process/sync/baseForkDetector_test.go | 76 ++++++++++++++++ process/sync/export_test.go | 8 ++ process/sync/metaForkDetector.go | 11 +-- process/sync/shardForkDetector.go | 5 ++ 22 files changed, 422 insertions(+), 165 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 5a804c0b503..590651953e9 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -112,28 +112,28 @@ func (bp *baseProcessor) checkBlockValidity( return nil } - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + log.Info(fmt.Sprintf("hash does not match: local block hash is %s and node received block with previous hash %s\n", core.ToB64(chainHandler.GetGenesisHeaderHash()), core.ToB64(headerHandler.GetPrevHash()))) return process.ErrBlockHashDoesNotMatch } - log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", + log.Info(fmt.Sprintf("nonce does not match: local block nonce is 0 and node received block with nonce %d\n", headerHandler.GetNonce())) return process.ErrWrongNonceInBlock } if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", + log.Info(fmt.Sprintf("round does not match: local block round is %d and node received block with round %d\n", currentBlockHeader.GetRound(), headerHandler.GetRound())) return process.ErrLowerRoundInBlock } if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", + log.Info(fmt.Sprintf("nonce does not match: local block nonce is %d and node received block with nonce %d\n", currentBlockHeader.GetNonce(), headerHandler.GetNonce())) return process.ErrWrongNonceInBlock @@ -144,20 +144,20 @@ func (bp *baseProcessor) checkBlockValidity( return err } - if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", - core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) - - return process.ErrRandSeedMismatch - } - if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + log.Info(fmt.Sprintf("hash does not match: local block hash is %s and node received block with previous hash %s\n", core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) return process.ErrBlockHashDoesNotMatch } + if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { + log.Info(fmt.Sprintf("random seed does not match: local block random seed is %s and node received block with previous random seed %s\n", + core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) + + return process.ErrRandSeedDoesNotMatch + } + if bodyHandler != nil { // TODO: add bodyHandler verification here } @@ -202,7 +202,7 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } // block with nonce 0 was already saved if prevHdr.GetRootHash() != nil { - return process.ErrRootStateMissmatch + return process.ErrRootStateDoesNotMatch } return nil } @@ -210,10 +210,14 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand //TODO: add verification if rand seed was correctly computed add other verification //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected if prevHdr.GetRound() >= currHdr.GetRound() { - return process.ErrLowerRoundInOtherChainBlock + log.Info(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", + currHdr.GetShardID(), prevHdr.GetRound(), currHdr.GetRound())) + return process.ErrLowerRoundInBlock } if currHdr.GetNonce() != prevHdr.GetNonce()+1 { + log.Info(fmt.Sprintf("nonce does not match in shard %d: local block nonce is %d and node received block with nonce %d\n", + currHdr.GetShardID(), prevHdr.GetNonce(), currHdr.GetNonce())) return process.ErrWrongNonceInBlock } @@ -222,12 +226,16 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand return err } - if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - return process.ErrRandSeedMismatch + if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { + log.Info(fmt.Sprintf("block hash does not match in shard %d: local block hash is %s and node received block with previous hash %s\n", + currHdr.GetShardID(), core.ToB64(prevHeaderHash), core.ToB64(currHdr.GetPrevHash()))) + return process.ErrBlockHashDoesNotMatch } - if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - return process.ErrHashDoesNotMatchInOtherChainBlock + if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { + log.Info(fmt.Sprintf("random seed does not match in shard %d: local block random seed is %s and node received block with previous random seed %s\n", + currHdr.GetShardID(), core.ToB64(prevHdr.GetRandSeed()), core.ToB64(currHdr.GetPrevRandSeed()))) + return process.ErrRandSeedDoesNotMatch } return nil diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 898ec279889..01ef310073f 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -375,26 +375,19 @@ func TestBlockProcessor_CheckBlockValidity(t *testing.T) { assert.Equal(t, process.ErrWrongNonceInBlock, err) hdr.Nonce = 2 - hdr.PrevRandSeed = []byte("X") - err = bp.CheckBlockValidity(blkc, hdr, body) - assert.Equal(t, process.ErrRandSeedMismatch, err) - - hdr.PrevRandSeed = []byte("") hdr.PrevHash = []byte("X") err = bp.CheckBlockValidity(blkc, hdr, body) assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) - hdr.Nonce = 3 - hdr.PrevHash = []byte("") - err = bp.CheckBlockValidity(blkc, hdr, body) - assert.Equal(t, process.ErrWrongNonceInBlock, err) - - hdr.Nonce = 2 marshalizerMock := mock.MarshalizerMock{} hasherMock := mock.HasherMock{} prevHeader, _ := marshalizerMock.Marshal(blkc.GetCurrentBlockHeader()) hdr.PrevHash = hasherMock.Compute(string(prevHeader)) + hdr.PrevRandSeed = []byte("X") + err = bp.CheckBlockValidity(blkc, hdr, body) + assert.Equal(t, process.ErrRandSeedDoesNotMatch, err) + hdr.PrevRandSeed = []byte("") err = bp.CheckBlockValidity(blkc, hdr, body) assert.Nil(t, err) } diff --git a/process/block/metablock.go b/process/block/metablock.go index 476d74945ad..c46dbeac945 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -140,6 +140,14 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { + if err == process.ErrBlockHashDoesNotMatch { + log.Info(fmt.Sprintf("requested missing meta header with hash %s for shard %d\n", + core.ToB64(headerHandler.GetPrevHash()), + headerHandler.GetShardID())) + + go mp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + } + return err } @@ -211,7 +219,7 @@ func (mp *metaProcessor) ProcessBlock( } if !mp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch + err = process.ErrRootStateDoesNotMatch return err } @@ -501,7 +509,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", + log.Info(fmt.Sprintf("meta block with nonce %d and hash %s has been committed successfully\n", header.Nonce, core.ToB64(headerHash))) @@ -515,6 +523,10 @@ func (mp *metaProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } + log.Info(fmt.Sprintf("meta block with nonce %d is the highest final block in shard %d\n", + mp.forkDetector.GetHighestFinalBlockNonce(), + mp.shardCoordinator.SelfId())) + hdrsToAttestPreviousFinal := mp.nextKValidity + 1 mp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) @@ -706,7 +718,7 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high } //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) + sortedHdrPerShard, err := mp.getFinalityAttestingHeaders(highestNonceHdrs, process.ShardBlockFinality) if err != nil { return err } @@ -746,6 +758,57 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high return nil } +func (mp *metaProcessor) getFinalityAttestingHeaders( + highestNonceHdrs map[uint32]data.HeaderHandler, + finality uint64, +) (map[uint32][]*block.Header, error) { + + shardHeadersPool := mp.dataPool.ShardHeaders() + if shardHeadersPool == nil { + return nil, process.ErrNilShardBlockPool + } + + headersMap := make(map[uint32][]*block.Header) + // get keys and arrange them into shards + for _, key := range shardHeadersPool.Keys() { + val, _ := shardHeadersPool.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.Header) + if !ok { + continue + } + + if highestNonceHdrs[hdr.ShardId] == nil { + continue + } + + isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdrs[hdr.ShardId].GetNonce() + isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdrs[hdr.ShardId].GetNonce()+finality + + if isHdrNonceLowerOrEqualThanHighestNonce || + isHdrNonceHigherThanFinalNonce { + continue + } + + headersMap[hdr.ShardId] = append(headersMap[hdr.ShardId], hdr) + } + + // sort headers for each shard + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := headersMap[shardId] + if len(hdrsForShard) > 1 { + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() + }) + } + } + + return headersMap, nil +} + func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { if currHdr == nil { return false, nil @@ -1169,9 +1232,9 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( } func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte, map[uint32][]*block.Header, error) { - hdrStore := mp.dataPool.ShardHeaders() - if hdrStore == nil { - return nil, nil, nil, process.ErrNilCacher + shardBlocksPool := mp.dataPool.ShardHeaders() + if shardBlocksPool == nil { + return nil, nil, nil, process.ErrNilShardBlockPool } hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) @@ -1186,8 +1249,8 @@ func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte } // get keys and arrange them into shards - for _, key := range hdrStore.Keys() { - val, _ := hdrStore.Peek(key) + for _, key := range shardBlocksPool.Keys() { + val, _ := shardBlocksPool.Peek(key) if val == nil { continue } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 741ec1aab42..dbf2b4bd0c6 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -493,7 +493,7 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState hdr.ShardInfo = make([]block.ShardData, 0) err := mp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.Equal(t, process.ErrRootStateDoesNotMatch, err) assert.True(t, wasCalled) } @@ -689,6 +689,9 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { return nil }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, }, mock.NewOneShardCoordinatorMock(), &mock.HasherStub{}, @@ -810,6 +813,9 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return errors.New("should have not got here") }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, } hasher := &mock.HasherStub{} blockHeaderUnit := &mock.StorerStub{ @@ -2197,7 +2203,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { currHdr.Nonce = 0 prevHdr.Nonce = 0 err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) + assert.Equal(t, err, process.ErrRootStateDoesNotMatch) currHdr.Nonce = 0 prevHdr.Nonce = 0 @@ -2209,7 +2215,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + assert.Equal(t, err, process.ErrLowerRoundInBlock) prevHdr.Round = currHdr.Round - 1 currHdr.Nonce = prevHdr.Nonce + 2 @@ -2217,16 +2223,17 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { assert.Equal(t, err, process.ErrWrongNonceInBlock) currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash = []byte("wronghash") err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) + assert.Equal(t, err, process.ErrBlockHashDoesNotMatch) - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") + prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash, _ = mp.ComputeHeaderHash(prevHdr) err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + assert.Equal(t, err, process.ErrRandSeedDoesNotMatch) currHdr.PrevHash = prevHash + prevHdr.RandSeed = currRandSeed prevHdr.RootHash = []byte("prevRootHash") err = mp.IsHdrConstructionValid(currHdr, prevHdr) assert.Nil(t, err) diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 1c3d0426a6e..b1355dc0f20 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -71,21 +71,6 @@ func (bpp *basePreProcess) removeDataFromPools(body block.Body, miniBlockPool st return nil } -func (bpp *basePreProcess) restoreMiniBlock( - miniBlock *block.MiniBlock, - miniBlockHash []byte, - miniBlockPool storage.Cacher, -) []byte { - - miniBlockPool.Put(miniBlockHash, miniBlock) - //TODO: Analyze what is the scope of this check and return besides tests. Refactor this method - if miniBlock.SenderShardID != bpp.shardCoordinator.SelfId() { - return miniBlockHash - } - - return nil -} - func (bpp *basePreProcess) createMarshalizedData(txHashes [][]byte, forBlock *txsForBlock) ([][]byte, error) { mrsTxs := make([][]byte, 0) for _, txHash := range txHashes { diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 4b1b20ca62f..8f50af74e88 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -130,13 +130,11 @@ func (scr *smartContractResults) RemoveTxBlockFromPools(body block.Body, miniBlo func (scr *smartContractResults) RestoreTxBlockIntoPools( body block.Body, miniBlockPool storage.Cacher, -) (int, map[int][]byte, error) { +) (int, error) { if miniBlockPool == nil || miniBlockPool.IsInterfaceNil() { - return 0, nil, process.ErrNilMiniBlockPool + return 0, process.ErrNilMiniBlockPool } - miniBlockHashes := make(map[int][]byte) - scrRestored := 0 for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -147,41 +145,40 @@ func (scr *smartContractResults) RestoreTxBlockIntoPools( strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) scrBuff, err := scr.storage.GetAll(dataRetriever.UnsignedTransactionUnit, miniBlock.TxHashes) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } for txHash, txBuff := range scrBuff { tx := smartContractResult.SmartContractResult{} err = scr.marshalizer.Unmarshal(&tx, txBuff) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } scr.scrPool.AddData([]byte(txHash), &tx, strCache) err = scr.storage.GetStorer(dataRetriever.UnsignedTransactionUnit).Remove([]byte(txHash)) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } } miniBlockHash, err := core.CalculateHash(scr.marshalizer, scr.hasher, miniBlock) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } - restoredHash := scr.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + miniBlockPool.Put(miniBlockHash, miniBlock) err = scr.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } - miniBlockHashes[i] = restoredHash scrRestored += len(miniBlock.TxHashes) } - return scrRestored, miniBlockHashes, nil + return scrRestored, nil } // ProcessBlockTransactions processes all the smartContractResult from the block.Body, updates the state diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index b6686f174b8..2452bc28878 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -794,9 +794,8 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { body = append(body, &miniblock) miniblockPool := mock.NewCacherMock() - scrRestored, miniBlockHashes, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + scrRestored, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) - assert.Equal(t, miniBlockHashes[0], []uint8([]byte(nil))) assert.Equal(t, scrRestored, 1) assert.Nil(t, err) } @@ -826,7 +825,7 @@ func TestScrsPreprocessor__RestoreTxBlockIntoPoolsNilMiniblockPoolShouldErr(t *t miniblockPool := storage.Cacher(nil) - _, _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMiniBlockPool) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 8eee907dcb5..02850fcb6ac 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "sort" + "sync" "time" "github.com/ElrondNetwork/elrond-go/core" @@ -34,6 +35,9 @@ type transactions struct { storage dataRetriever.StorageService txProcessor process.TransactionProcessor accounts state.AccountsAdapter + orderedTxs map[string][]*transaction.Transaction + orderedTxHashes map[string][][]byte + mutOrderedTxs sync.RWMutex } // NewTransactionPreprocessor creates a new transaction preprocessor object @@ -92,6 +96,8 @@ func NewTransactionPreprocessor( txs.txPool.RegisterHandler(txs.receivedTransaction) txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) + txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxHashes = make(map[string][][]byte) return &txs, nil } @@ -142,8 +148,7 @@ func (txs *transactions) RemoveTxBlockFromPools(body block.Body, miniBlockPool s func (txs *transactions) RestoreTxBlockIntoPools( body block.Body, miniBlockPool storage.Cacher, -) (int, map[int][]byte, error) { - miniBlockHashes := make(map[int][]byte) +) (int, error) { txsRestored := 0 for i := 0; i < len(body); i++ { @@ -151,41 +156,40 @@ func (txs *transactions) RestoreTxBlockIntoPools( strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) txsBuff, err := txs.storage.GetAll(dataRetriever.TransactionUnit, miniBlock.TxHashes) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } for txHash, txBuff := range txsBuff { tx := transaction.Transaction{} err = txs.marshalizer.Unmarshal(&tx, txBuff) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } txs.txPool.AddData([]byte(txHash), &tx, strCache) err = txs.storage.GetStorer(dataRetriever.TransactionUnit).Remove([]byte(txHash)) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } } miniBlockHash, err := core.CalculateHash(txs.marshalizer, txs.hasher, miniBlock) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } - restoredHash := txs.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + miniBlockPool.Put(miniBlockHash, miniBlock) err = txs.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } - miniBlockHashes[i] = restoredHash txsRestored += len(miniBlock.TxHashes) } - return txsRestored, miniBlockHashes, nil + return txsRestored, nil } // ProcessBlockTransactions processes all the transaction from the block.Body, updates the state @@ -267,6 +271,11 @@ func (txs *transactions) CreateBlockStarted() { txs.txsForCurrBlock.missingTxs = 0 txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) txs.txsForCurrBlock.mutTxsForBlock.Unlock() + + txs.mutOrderedTxs.Lock() + txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxHashes = make(map[string][][]byte) + txs.mutOrderedTxs.Unlock() } // RequestBlockTransactions request for transactions if missing from a block.Body @@ -420,12 +429,19 @@ func isSmartContractAddress(rcvAddress []byte) bool { } // CreateAndProcessMiniBlock creates the miniblock from storage and processes the transactions added into the miniblock -func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - txStore := txs.txPool.ShardDataStore(strCache) +func (txs *transactions) CreateAndProcessMiniBlock( + sndShardId uint32, + dstShardId uint32, + spaceRemained int, + haveTime func() bool, + round uint64, +) (*block.MiniBlock, error) { + + var orderedTxs []*transaction.Transaction + var orderedTxHashes [][]byte timeBefore := time.Now() - orderedTxes, orderedTxHashes, err := SortTxByNonce(txStore) + orderedTxs, orderedTxHashes, err := txs.computeOrderedTxs(sndShardId, dstShardId) timeAfter := time.Now() if err != nil { @@ -434,22 +450,22 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 } if !haveTime() { - log.Info(fmt.Sprintf("time is up after ordered %d txs in %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + log.Info(fmt.Sprintf("time is up after ordered %d txs in %v sec\n", len(orderedTxs), timeAfter.Sub(timeBefore).Seconds())) return nil, process.ErrTimeIsOut } - log.Debug(fmt.Sprintf("time elapsed to ordered %d txs: %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + log.Debug(fmt.Sprintf("time elapsed to ordered %d txs: %v sec\n", len(orderedTxs), timeAfter.Sub(timeBefore).Seconds())) miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = sndShardId miniBlock.ReceiverShardID = dstShardId miniBlock.TxHashes = make([][]byte, 0) miniBlock.Type = block.TxBlock - log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard id %d\n", len(orderedTxes), miniBlock.ReceiverShardID)) + log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard id %d\n", len(orderedTxs), miniBlock.ReceiverShardID)) addedTxs := 0 addedGasLimitPerCrossShardMiniblock := uint64(0) - for index := range orderedTxes { + for index := range orderedTxs { if !haveTime() { break } @@ -459,8 +475,8 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 } currTxGasLimit := minGasLimitForTx - if isSmartContractAddress(orderedTxes[index].RcvAddr) { - currTxGasLimit = orderedTxes[index].GasLimit + if isSmartContractAddress(orderedTxs[index].RcvAddr) { + currTxGasLimit = orderedTxs[index].GasLimit } if addedGasLimitPerCrossShardMiniblock+currTxGasLimit > process.MaxGasLimitPerMiniBlock { @@ -472,7 +488,7 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 // execute transaction to change the trie root hash err := txs.processAndRemoveBadTransaction( orderedTxHashes[index], - orderedTxes[index], + orderedTxs[index], round, miniBlock.SenderShardID, miniBlock.ReceiverShardID, @@ -492,7 +508,7 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 addedGasLimitPerCrossShardMiniblock += currTxGasLimit if addedTxs >= spaceRemained { // max transactions count in one block was reached - log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxes))) + log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxs))) return miniBlock, nil } } @@ -500,6 +516,34 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 return miniBlock, nil } +func (txs *transactions) computeOrderedTxs( + sndShardId uint32, + dstShardId uint32, +) ([]*transaction.Transaction, [][]byte, error) { + + var err error + + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + txStore := txs.txPool.ShardDataStore(strCache) + + txs.mutOrderedTxs.RLock() + orderedTxs := txs.orderedTxs[strCache] + orderedTxHashes := txs.orderedTxHashes[strCache] + txs.mutOrderedTxs.RUnlock() + + alreadyOrdered := len(orderedTxs) > 0 + if !alreadyOrdered { + orderedTxs, orderedTxHashes, err = SortTxByNonce(txStore) + + txs.mutOrderedTxs.Lock() + txs.orderedTxs[strCache] = orderedTxs + txs.orderedTxHashes[strCache] = orderedTxHashes + txs.mutOrderedTxs.Unlock() + } + + return orderedTxs, orderedTxHashes, err +} + // ProcessMiniBlock processes all the transactions from a and saves the processed transactions in local cache complete miniblock func (txs *transactions) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { if miniBlock.Type != block.TxBlock { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index a3f8392307f..5889aa8e551 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -143,6 +143,14 @@ func (sp *shardProcessor) ProcessBlock( err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { + if err == process.ErrBlockHashDoesNotMatch { + log.Info(fmt.Sprintf("requested missing shard header with hash %s for shard %d\n", + core.ToB64(headerHandler.GetPrevHash()), + headerHandler.GetShardID())) + + go sp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + } + return err } @@ -231,7 +239,7 @@ func (sp *shardProcessor) ProcessBlock( } if !sp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch + err = process.ErrRootStateDoesNotMatch return err } @@ -286,7 +294,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head tmpNotedHdr = metaHdr } - err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) + err = sp.checkMetaHdrFinality(tmpNotedHdr) if err != nil { return err } @@ -295,12 +303,12 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head } // check if shard headers are final by checking if newer headers were constructed upon them -func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round uint64) error { +func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error { if header == nil || header.IsInterfaceNil() { return process.ErrNilBlockHeader } - sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) + sortedMetaHdrs, err := sp.getFinalityAttestingHeaders(header, process.MetaBlockFinality) if err != nil { return err } @@ -334,6 +342,53 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round return nil } +func (sp *shardProcessor) getFinalityAttestingHeaders( + highestNonceHdr data.HeaderHandler, + finality uint64, +) ([]*hashAndHdr, error) { + + if highestNonceHdr == nil || highestNonceHdr.IsInterfaceNil() { + return nil, process.ErrNilBlockHeader + } + + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { + return nil, process.ErrNilMetaBlockPool + } + + orderedMetaBlocks := make([]*hashAndHdr, 0) + // get keys and arrange them into shards + for _, key := range metaBlockPool.Keys() { + val, _ := metaBlockPool.Peek(key) + if val == nil { + continue + } + + hdr, ok := val.(*block.MetaBlock) + if !ok { + continue + } + + isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdr.GetNonce() + isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdr.GetNonce()+finality + + if isHdrNonceLowerOrEqualThanHighestNonce || + isHdrNonceHigherThanFinalNonce { + continue + } + + orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) + } + + if len(orderedMetaBlocks) > 1 { + sort.Slice(orderedMetaBlocks, func(i, j int) bool { + return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() + }) + } + + return orderedMetaBlocks, nil +} + // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) @@ -437,7 +492,7 @@ func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler return process.ErrWrongTypeAssertion } - restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) + restoredTxNr, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) go sp.txCounter.subtractRestoredTxs(restoredTxNr) if err != nil { return err @@ -653,7 +708,7 @@ func (sp *shardProcessor) CommitBlock( return err } - log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", + log.Info(fmt.Sprintf("shard block with nonce %d and hash %s has been committed successfully\n", header.Nonce, core.ToB64(headerHash))) @@ -674,7 +729,7 @@ func (sp *shardProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } - log.Info(fmt.Sprintf("shardBlock with nonce %d is the highest block notarized by metachain for shard %d\n", + log.Info(fmt.Sprintf("shard block with nonce %d is the highest final block in shard %d\n", sp.forkDetector.GetHighestFinalBlockNonce(), sp.shardCoordinator.SelfId())) @@ -1134,8 +1189,8 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes } func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { + metaBlocksPool := sp.dataPool.MetaBlocks() + if metaBlocksPool == nil { return nil, process.ErrNilMetaBlockPool } @@ -1145,8 +1200,8 @@ func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err } orderedMetaBlocks := make([]*hashAndHdr, 0) - for _, key := range metaBlockCache.Keys() { - val, _ := metaBlockCache.Peek(key) + for _, key := range metaBlocksPool.Keys() { + val, _ := metaBlocksPool.Peek(key) if val == nil { continue } @@ -1169,9 +1224,11 @@ func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) } - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) + if len(orderedMetaBlocks) > 1 { + sort.Slice(orderedMetaBlocks, func(i, j int) bool { + return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() + }) + } return orderedMetaBlocks, nil } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 5116eb7bd3e..390ec1f7f78 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -714,7 +714,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat sp, _ := blproc.NewShardProcessor(arguments) // should return err err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.Equal(t, process.ErrRootStateDoesNotMatch, err) assert.True(t, wasCalled) } @@ -1603,7 +1603,7 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 + return 0 }, } arguments.BlocksTracker = &mock.BlocksTrackerMock{ @@ -3174,7 +3174,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { currHdr.Nonce = 0 prevHdr.Nonce = 0 err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) + assert.Equal(t, err, process.ErrRootStateDoesNotMatch) currHdr.Nonce = 0 prevHdr.Nonce = 0 @@ -3186,7 +3186,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + assert.Equal(t, err, process.ErrLowerRoundInBlock) prevHdr.Round = currHdr.Round - 1 currHdr.Nonce = prevHdr.Nonce + 2 @@ -3194,16 +3194,17 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { assert.Equal(t, err, process.ErrWrongNonceInBlock) currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash = []byte("wronghash") err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) + assert.Equal(t, err, process.ErrBlockHashDoesNotMatch) - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") + prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash, _ = sp.ComputeHeaderHash(prevHdr) err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + assert.Equal(t, err, process.ErrRandSeedDoesNotMatch) currHdr.PrevHash = prevHash + prevHdr.RandSeed = currRandSeed prevHdr.RootHash = []byte("prevRootHash") err = sp.IsHdrConstructionValid(currHdr, prevHdr) assert.Nil(t, err) @@ -3895,7 +3896,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t arguments := CreateMockArgumentsMultiShard() sp, _ := blproc.NewShardProcessor(arguments) - hdrs,_,_ := sp.GetHighestHdrForOwnShardFromMetachain(nil) + hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(nil) assert.NotNil(t, hdrs) assert.Equal(t, uint64(0), hdrs[0].GetNonce()) diff --git a/process/constants.go b/process/constants.go index 2a5be77344e..b7653e18c3f 100644 --- a/process/constants.go +++ b/process/constants.go @@ -33,6 +33,7 @@ const MetaBlockFinality = 1 const MaxHeaderRequestsAllowed = 10 const MaxItemsInBlock = 15000 const MinItemsInBlock = 1000 +const MaxNoncesDifference = 5 // TODO - calculate exactly in case of the VM, for every VM to have a similar constant, operations / seconds const MaxGasLimitPerMiniBlock = uint64(100000) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index a13526cade9..d29817d57f9 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -250,28 +250,25 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error } // RestoreBlockDataFromStorage restores block data from storage to pool -func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { +func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, error) { separatedBodies := tc.separateBodyByType(body) var errFound error localMutex := sync.Mutex{} totalRestoredTx := 0 - restoredMbHashes := make(map[int][][]byte) wg := sync.WaitGroup{} wg.Add(len(separatedBodies)) for key, value := range separatedBodies { go func(blockType block.Type, blockBody block.Body) { - restoredMbs := make(map[int][]byte) - preproc := tc.getPreProcessor(blockType) if preproc == nil { wg.Done() return } - restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) + restoredTxs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) if err != nil { log.Debug(err.Error()) @@ -283,10 +280,6 @@ func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) ( localMutex.Lock() totalRestoredTx += restoredTxs - for shId, mbHash := range restoredMbs { - restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) - } - localMutex.Unlock() wg.Done() @@ -295,7 +288,7 @@ func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) ( wg.Wait() - return totalRestoredTx, restoredMbHashes, errFound + return totalRestoredTx, errFound } // RemoveBlockDataFromPool deletes block data from pools diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 83b69dfee57..34fce3950cd 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -1204,10 +1204,9 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, tc) - nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) + nrTxs, err := tc.RestoreBlockDataFromStorage(nil) assert.Nil(t, err) assert.Equal(t, 0, nrTxs) - assert.Equal(t, 0, len(mbs)) body := block.Body{} miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} @@ -1216,9 +1215,8 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { tc.RequestBlockTransactions(body) err = tc.SaveBlockDataToStorage(body) assert.Nil(t, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + nrTxs, err = tc.RestoreBlockDataFromStorage(body) assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) assert.Nil(t, err) txHashToAsk := []byte("tx_hashnotinPool") @@ -1228,9 +1226,8 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { err = tc.SaveBlockDataToStorage(body) assert.Equal(t, process.ErrMissingTransaction, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + nrTxs, err = tc.RestoreBlockDataFromStorage(body) assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) assert.NotNil(t, err) } diff --git a/process/errors.go b/process/errors.go index e111176f202..d6833eebfcd 100644 --- a/process/errors.go +++ b/process/errors.go @@ -91,11 +91,14 @@ var ErrNilRootHash = errors.New("root hash is nil") // ErrWrongNonceInBlock signals the nonce in block is different than expected nonce var ErrWrongNonceInBlock = errors.New("wrong nonce in block") -// ErrBlockHashDoesNotMatch signals the hash of the block is not matching with the previous one +// ErrWrongNonceInOtherChainBlock signals the nonce in block is different than expected nonce +var ErrWrongNonceInOtherChainBlock = errors.New("wrong nonce in other chain block") + +// ErrBlockHashDoesNotMatch signals that header hash does not match with the previous one var ErrBlockHashDoesNotMatch = errors.New("block hash does not match") -// ErrHashDoesNotMatchInOtherChainBlock signals that header hash for one shard is not match with the previous one -var ErrHashDoesNotMatchInOtherChainBlock = errors.New("block hash does not match with the last committed for this shard") +// ErrBlockHashDoesNotMatchInOtherChainBlock signals that header hash does not match with the previous one +var ErrBlockHashDoesNotMatchInOtherChainBlock = errors.New("block hash does not match in other chain block") // ErrMissingTransaction signals that one transaction is missing var ErrMissingTransaction = errors.New("missing transaction") @@ -106,8 +109,11 @@ var ErrMarshalWithoutSuccess = errors.New("marshal without success") // ErrUnmarshalWithoutSuccess signals that unmarshal some data was not done with success var ErrUnmarshalWithoutSuccess = errors.New("unmarshal without success") -// ErrRootStateMissmatch signals that persist some data was not done with success -var ErrRootStateMissmatch = errors.New("root state does not match") +// ErrRootStateDoesNotMatch signals that root state does not match +var ErrRootStateDoesNotMatch = errors.New("root state does not match") + +// ErrRootStateDoesNotMatchInOtherChainBlock signals that root state does not match +var ErrRootStateDoesNotMatchInOtherChainBlock = errors.New("root state does not match in other chain block") // ErrAccountStateDirty signals that the accounts were modified before starting the current modification var ErrAccountStateDirty = errors.New("accountState was dirty before starting to change") @@ -229,6 +235,9 @@ var ErrNilMiniBlockPool = errors.New("nil mini block pool") // ErrNilMetaBlockPool signals that a nil meta blocks pool was used var ErrNilMetaBlockPool = errors.New("nil meta block pool") +// ErrNilShardBlockPool signals that a nil shard blocks pool was used +var ErrNilShardBlockPool = errors.New("nil shard block pool") + // ErrNilTxProcessor signals that a nil transactions processor was used var ErrNilTxProcessor = errors.New("nil transactions processor") @@ -292,14 +301,17 @@ var ErrNilPrevRandSeed = errors.New("provided previous rand seed is nil") // ErrNilRequestHeaderHandlerByNonce signals that a nil header request handler by nonce func was provided var ErrNilRequestHeaderHandlerByNonce = errors.New("nil request header handler by nonce") -// ErrLowerRoundInOtherChainBlock signals that header round for one shard is too low for processing it -var ErrLowerRoundInOtherChainBlock = errors.New("header round is lower than last committed for this shard") +// ErrLowerRoundInOtherChainBlock signals that header round too low for processing it +var ErrLowerRoundInOtherChainBlock = errors.New("header round is lower than last committed in other chain block") -// ErrLowerRoundInBlock signals that a header round is too low for processing +// ErrLowerRoundInBlock signals that a header round is too low for processing it var ErrLowerRoundInBlock = errors.New("header round is lower than last committed") -// ErrRandSeedMismatch signals that random seeds are not equal -var ErrRandSeedMismatch = errors.New("random seeds do not match") +// ErrRandSeedDoesNotMatch signals that random seed does not match with the previous one +var ErrRandSeedDoesNotMatch = errors.New("random seed do not match") + +// ErrRandSeedDoesNotMatchInOtherChainBlock signals that seed does not match with the previous one +var ErrRandSeedDoesNotMatchInOtherChainBlock = errors.New("random seed does not match in other chain block") // ErrHeaderNotFinal signals that header is not final and it should be var ErrHeaderNotFinal = errors.New("header in metablock is not final") diff --git a/process/interface.go b/process/interface.go index cbcd56556e2..8786577153b 100644 --- a/process/interface.go +++ b/process/interface.go @@ -55,7 +55,7 @@ type TransactionCoordinator interface { IsDataPreparedForProcessing(haveTime func() time.Duration) error SaveBlockDataToStorage(body block.Body) error - RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) + RestoreBlockDataFromStorage(body block.Body) (int, error) RemoveBlockDataFromPool(body block.Body) error ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error @@ -98,7 +98,7 @@ type PreProcessor interface { IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorage(body block.Body) error ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index c4e069a0bf9..f3a1484430a 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -12,7 +12,7 @@ type PreProcessorMock struct { CreateBlockStartedCalled func() IsDataPreparedCalled func(requestedTxs int, haveTime func() time.Duration) error RemoveTxBlockFromPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorageCalled func(body block.Body) error ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() time.Duration) error RequestBlockTransactionsCalled func(body block.Body) int @@ -44,9 +44,9 @@ func (ppm *PreProcessorMock) RemoveTxBlockFromPools(body block.Body, miniBlockPo return ppm.RemoveTxBlockFromPoolsCalled(body, miniBlockPool) } -func (ppm *PreProcessorMock) RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) { +func (ppm *PreProcessorMock) RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, error) { if ppm.RestoreTxBlockIntoPoolsCalled == nil { - return 0, nil, nil + return 0, nil } return ppm.RestoreTxBlockIntoPoolsCalled(body, miniBlockPool) } diff --git a/process/mock/transactionCoordinatorMock.go b/process/mock/transactionCoordinatorMock.go index 7437c451aad..34746504a93 100644 --- a/process/mock/transactionCoordinatorMock.go +++ b/process/mock/transactionCoordinatorMock.go @@ -14,7 +14,7 @@ type TransactionCoordinatorMock struct { RequestBlockTransactionsCalled func(body block.Body) IsDataPreparedForProcessingCalled func(haveTime func() time.Duration) error SaveBlockDataToStorageCalled func(body block.Body) error - RestoreBlockDataFromStorageCalled func(body block.Body) (int, map[int][][]byte, error) + RestoreBlockDataFromStorageCalled func(body block.Body) (int, error) RemoveBlockDataFromPoolCalled func(body block.Body) error ProcessBlockTransactionCalled func(body block.Body, round uint64, haveTime func() time.Duration) error CreateBlockStartedCalled func() @@ -65,9 +65,9 @@ func (tcm *TransactionCoordinatorMock) SaveBlockDataToStorage(body block.Body) e return tcm.SaveBlockDataToStorageCalled(body) } -func (tcm *TransactionCoordinatorMock) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { +func (tcm *TransactionCoordinatorMock) RestoreBlockDataFromStorage(body block.Body) (int, error) { if tcm.RestoreBlockDataFromStorageCalled == nil { - return 0, nil, nil + return 0, nil } return tcm.RestoreBlockDataFromStorageCalled(body) diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index aa082ceac5b..d267f1cb00b 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -450,3 +450,23 @@ func (bfd *baseForkDetector) shouldSignalFork( return shouldSignalFork } + +func (bfd *baseForkDetector) shouldAddBlockInForkDetector( + header data.HeaderHandler, + state process.BlockHeaderState, + finality int64, +) error { + + noncesDifference := int64(bfd.ProbableHighestNonce()) - int64(header.GetNonce()) + isSyncing := state == process.BHReceived && noncesDifference > process.MaxNoncesDifference + if state == process.BHProcessed || isSyncing { + return nil + } + + roundTooOld := int64(header.GetRound()) < bfd.rounder.Index()-finality + if roundTooOld { + return ErrLowerRoundInBlock + } + + return nil +} diff --git a/process/sync/baseForkDetector_test.go b/process/sync/baseForkDetector_test.go index 85a189e2c13..1c901893c81 100644 --- a/process/sync/baseForkDetector_test.go +++ b/process/sync/baseForkDetector_test.go @@ -761,3 +761,79 @@ func TestBasicForkDetector_GetProbableHighestNonce(t *testing.T) { hInfos = bfd.GetHeaders(3) assert.Equal(t, uint64(3), bfd.GetProbableHighestNonce(hInfos)) } + +func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + + hdr := &block.Header{Nonce: 1, Round: 1} + err := sfd.ShouldAddBlockInForkDetector(hdr, process.BHProcessed, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference) + hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.ShardBlockFinality) + assert.Nil(t, err) +} + +func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + hdr := &block.Header{Nonce: 1, Round: 1} + + hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality - 1) + err := sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.ShardBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) +} + +func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + mfd, _ := sync.NewMetaForkDetector(rounderMock) + + hdr := &block.MetaBlock{Nonce: 1, Round: 1} + err := mfd.ShouldAddBlockInForkDetector(hdr, process.BHProcessed, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference) + hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.MetaBlockFinality) + assert.Nil(t, err) +} + +func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + mfd, _ := sync.NewMetaForkDetector(rounderMock) + hdr := &block.MetaBlock{Nonce: 1, Round: 1} + + hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality - 1) + err := mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.MetaBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) +} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 0273107dd62..d99cdcbc29e 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -272,3 +272,11 @@ func (sbm *StorageBootstrapperMock) IsInterfaceNil() bool { } return false } + +func (bfd *baseForkDetector) ShouldAddBlockInForkDetector(header data.HeaderHandler, state process.BlockHeaderState, finality int64) error { + return bfd.shouldAddBlockInForkDetector(header, state, finality) +} + +func (bfd *baseForkDetector) SetProbableHighestNonce(nonce uint64) { + bfd.setProbableHighestNonce(nonce) +} diff --git a/process/sync/metaForkDetector.go b/process/sync/metaForkDetector.go index b2b63348a58..53c1cafea6a 100644 --- a/process/sync/metaForkDetector.go +++ b/process/sync/metaForkDetector.go @@ -54,7 +54,7 @@ func (mfd *metaForkDetector) AddHeader( return err } - err = mfd.checkMetaBlockValidity(header) + err = mfd.shouldAddBlockInForkDetector(header, state, process.MetaBlockFinality) if err != nil { return err } @@ -78,12 +78,3 @@ func (mfd *metaForkDetector) AddHeader( return nil } - -func (mfd *metaForkDetector) checkMetaBlockValidity(header data.HeaderHandler) error { - roundTooOld := int64(header.GetRound()) < mfd.rounder.Index()-process.MetaBlockFinality - if roundTooOld { - return ErrLowerRoundInBlock - } - - return nil -} diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index e2cd33c2930..334f8d0fdae 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -54,6 +54,11 @@ func (sfd *shardForkDetector) AddHeader( return err } + err = sfd.shouldAddBlockInForkDetector(header, state, process.ShardBlockFinality) + if err != nil { + return err + } + if state == process.BHProcessed { sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) sfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound()}) From e030f5c6ac2af25a99d42123c33c1b908c592d52 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Wed, 25 Sep 2019 20:36:14 +0300 Subject: [PATCH 138/234] process, consensus, integrationTests: fix review findings --- cmd/node/factory/structs.go | 4 +- consensus/mock/blockProcessorMock.go | 2 +- .../spos/commonSubround/subroundStartRound.go | 4 +- data/address/specialAddresses.go | 52 ++++++++++++++----- data/errors.go | 3 ++ integrationTests/mock/blockProcessorMock.go | 6 +-- .../mock/specialAddressHandlerMock.go | 27 ++++++++-- .../block/executingRewardMiniblocks_test.go | 2 - .../smartContract/testInitilalizer.go | 3 ++ integrationTests/testInitializer.go | 3 +- integrationTests/testProcessorNode.go | 1 + .../testProcessorNodeWithMultisigner.go | 4 +- integrationTests/testSyncNode.go | 23 ++++---- node/mock/blockProcessorStub.go | 2 +- process/block/baseProcess_test.go | 13 +++-- process/block/export_test.go | 15 ++++-- process/block/metablock.go | 2 +- .../block/preprocess/rewardsHandler_test.go | 40 +++++++------- process/block/shardblock.go | 44 ++++++---------- process/block/shardblock_test.go | 6 +-- process/errors.go | 3 -- process/interface.go | 12 ++--- process/mock/blockProcessorMock.go | 2 +- process/mock/specialAddressHandlerMock.go | 25 +++++++-- 24 files changed, 179 insertions(+), 119 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index efb219ed987..a27a1d16224 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1462,7 +1462,9 @@ func newBlockProcessorAndTracker( communityAddress, burnAddress, state.AddressConverter, - shardCoordinator) + shardCoordinator, + nodesCoordinator, + ) if err != nil { return nil, nil, err } diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 7d782fe1078..d77ac805df7 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusData(consensusRewardAddresses []string, round uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 4dc9a782458..c4a164fcfbd 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -212,7 +212,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, rewardsAddresses, err := sr.GetNextConsensusGroup( + nextConsensusGroup, _, err := sr.GetNextConsensusGroup( randomSeed, uint64(sr.RoundIndex), shardId, @@ -233,7 +233,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusData(rewardsAddresses, uint64(sr.RoundIndex)) + sr.BlockProcessor().SetConsensusData(randomSeed, uint64(sr.RoundIndex), currentHeader.GetEpoch(), shardId) return nil } diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go index fd7a8cce71a..db5df6a5a28 100644 --- a/data/address/specialAddresses.go +++ b/data/address/specialAddresses.go @@ -7,13 +7,14 @@ import ( ) type specialAddresses struct { - elrondAddress []byte shardConsensusData *data.ConsensusRewardData metaConsensusData []*data.ConsensusRewardData + elrondAddress []byte burnAddress []byte adrConv state.AddressConverter shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator } // NewSpecialAddressHolder creates a special address holder @@ -22,6 +23,7 @@ func NewSpecialAddressHolder( burnAddress []byte, adrConv state.AddressConverter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) (*specialAddresses, error) { if elrondAddress == nil { return nil, data.ErrNilElrondAddress @@ -35,18 +37,41 @@ func NewSpecialAddressHolder( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, data.ErrNilShardCoordinator } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, data.ErrNilNodesCoordinator + } sp := &specialAddresses{ elrondAddress: elrondAddress, burnAddress: burnAddress, adrConv: adrConv, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, metaConsensusData: make([]*data.ConsensusRewardData, 0), } return sp, nil } +// SetShardConsensusData - sets the reward addresses for the current consensus group +func (sp *specialAddresses) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error { + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, round, shardID, + ) + if err != nil { + return err + } + + sp.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: consensusAddresses, + } + + return nil +} + // SetElrondCommunityAddress sets elrond address func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { sp.elrondAddress = elrond @@ -62,27 +87,29 @@ func (sp *specialAddresses) BurnAddress() []byte { return sp.burnAddress } -// SetConsensusData sets the consensus rewards addresses for the round -func (sp *specialAddresses) SetConsensusData(rewardAddresses []string, round uint64, epoch uint32) { - sp.shardConsensusData = &data.ConsensusRewardData{ - Round: round, - Epoch: epoch, - Addresses: rewardAddresses, - } -} - -// ConsensusShardRewardAddresses provides the consensus reward addresses +// ConsensusShardRewardData provides the consensus data required for generating the rewards for shard nodes func (sp *specialAddresses) ConsensusShardRewardData() *data.ConsensusRewardData { return sp.shardConsensusData } // SetMetaConsensusData sets the rewards addresses for the metachain nodes -func (sp *specialAddresses) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { +func (sp *specialAddresses) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + rewardAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, + round, + sharding.MetachainShardId, + ) + if err != nil { + return err + } + sp.metaConsensusData = append(sp.metaConsensusData, &data.ConsensusRewardData{ Round: round, Epoch: epoch, Addresses: rewardAddresses, }) + + return nil } // ClearMetaConsensusData clears the previously set addresses for rewarding metachain nodes @@ -90,6 +117,7 @@ func (sp *specialAddresses) ClearMetaConsensusData() { sp.metaConsensusData = make([]*data.ConsensusRewardData, 0) } +// ConsensusMetaRewardData provides the consensus data required for generating the rewards for metachain nodes func (sp *specialAddresses) ConsensusMetaRewardData() []*data.ConsensusRewardData { return sp.metaConsensusData } diff --git a/data/errors.go b/data/errors.go index b1e0620102f..57509288aeb 100644 --- a/data/errors.go +++ b/data/errors.go @@ -39,3 +39,6 @@ var ErrNilAddressConverter = errors.New("nil address converter") // ErrNilShardCoordinator signals that nil shard coordinator was provided var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilNodesCoordinator signals that nil shard coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index b5a75b87b35..65a48103c79 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -22,7 +22,7 @@ type BlockProcessorMock struct { DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusDataCalled func([]string) + SetConsensusDataCalled func(randomness []byte, round uint64, epoch uint32, shardId uint32) } // ProcessBlock mocks pocessing a block @@ -93,9 +93,9 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusData(consensusRewardAddresses []string, round uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { if blProcMock.SetConsensusDataCalled != nil { - blProcMock.SetConsensusDataCalled(consensusRewardAddresses) + blProcMock.SetConsensusDataCalled(randomness, round, epoch, shardId) } } diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go index 7df8bd7dced..26f70f560c8 100644 --- a/integrationTests/mock/specialAddressHandlerMock.go +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -13,6 +13,7 @@ type SpecialAddressHandlerMock struct { ShardIdForAddressCalled func([]byte) (uint32, error) AdrConv state.AddressConverter ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator shardConsensusData *data.ConsensusRewardData metaConsensusData []*data.ConsensusRewardData @@ -21,6 +22,7 @@ type SpecialAddressHandlerMock struct { func NewSpecialAddressHandlerMock( addrConv state.AddressConverter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) *SpecialAddressHandlerMock { return &SpecialAddressHandlerMock{ ElrondCommunityAddressCalled: nil, @@ -29,6 +31,7 @@ func NewSpecialAddressHandlerMock( ShardIdForAddressCalled: nil, AdrConv: addrConv, ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, shardConsensusData: &data.ConsensusRewardData{ Round: 0, Epoch: 0, @@ -41,28 +44,44 @@ func NewSpecialAddressHandlerMock( func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + sh.shardConsensusData = &data.ConsensusRewardData{ Round: round, Epoch: epoch, - Addresses: consensusRewardAddresses, + Addresses: addresses, } + + return nil + } func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { return sh.shardConsensusData } -func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { if sh.metaConsensusData == nil { sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) } + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ Round: round, Epoch: epoch, - Addresses: rewardAddresses, + Addresses: addresses, }) + + return nil + } func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 1f98ad91bba..579ce523a09 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -16,7 +16,6 @@ import ( "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/opentracing/opentracing-go/log" "github.com/stretchr/testify/assert" ) @@ -161,7 +160,6 @@ func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { shardRewardsData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() if shardRewardsData == nil { - log.Error(errors.New("nil shard rewards data")) shardRewardsData = &data.ConsensusRewardData{} } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 617cf34bab0..e35875384ab 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -344,6 +344,7 @@ func createNetNode( mock.NewSpecialAddressHandlerMock( testAddressConverter, shardCoordinator, + nodesCoordinator, ), store, dPool, @@ -441,6 +442,7 @@ func createNetNode( SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( testAddressConverter, shardCoordinator, + nodesCoordinator, ), Uint64Converter: uint64Converter, StartHeaders: genesisBlocks, @@ -777,6 +779,7 @@ func createMetaNetNode( mock.NewSpecialAddressHandlerMock( testAddressConverter, shardCoordinator, + nodesCoordinator, ), testHasher, testMarshalizer, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 4f0b6246593..9b4a477afb9 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -506,10 +506,9 @@ func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, fmt.Println("All shards propose blocks...") for idx, n := range nodes { - consensusRewardsData := n.SpecialAddressHandler.ConsensusShardRewardData() // set the consensus reward addresses as rewards processor expects at least valid round // otherwise the produced rewards will not be valid on verification - n.BlockProcessor.SetConsensusData(consensusRewardsData.Addresses, round) + n.BlockProcessor.SetConsensusData([]byte("randomness"), round, 0, n.ShardCoordinator.SelfId()) if !IsIntInSlice(idx, idxProposers) { continue diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 86f5dfe0a1f..8e176a21f13 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -193,6 +193,7 @@ func (tpn *TestProcessorNode) initTestNode() { tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( TestAddressConverter, tpn.ShardCoordinator, + tpn.NodesCoordinator, ) tpn.initStorage() tpn.AccntState, _, _ = CreateAccountsDB(0) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index 8d33125961d..c25c6c1864f 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -125,11 +125,9 @@ func ProposeBlockWithConsensusSignature( fmt.Println("Error getting the validators public keys: ", err) } - adddresses, err := nodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) - // set the consensus reward addresses for _, node := range nodesMap[shardId] { - node.BlockProcessor.SetConsensusData(adddresses, round) + node.BlockProcessor.SetConsensusData(randomness, round, 0, shardId) } consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index bc8e4ebf04b..7f0ec7ac890 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -57,6 +57,7 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( TestAddressConverter, tpn.ShardCoordinator, + tpn.NodesCoordinator, ) tpn.initInterceptors() tpn.initResolvers() @@ -112,18 +113,18 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder) arguments := block.ArgShardProcessor{ ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - NodesCoordinator: tpn.NodesCoordinator, + Accounts: tpn.AccntState, + ForkDetector: tpn.ForkDetector, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, SpecialAddressHandler: tpn.SpecialAddressHandler, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, }, DataPool: tpn.ShardDataPool, BlocksTracker: tpn.BlockTracker, diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 04a97928844..f8033ae7f27 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -72,7 +72,7 @@ func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorStub) SetConsensusData([]string, uint64) { +func (blProcMock BlockProcessorStub) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { panic("implement me") } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 318c11c2a5b..28a51e65def 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -307,6 +307,13 @@ func (wr *wrongBody) IsInterfaceNil() bool { } func CreateMockArguments() blproc.ArgShardProcessor { + nodesCoordinator := mock.NewNodesCoordinatorMock() + shardCoordinator := mock.NewOneShardCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := blproc.ArgShardProcessor{ ArgBaseProcessor: &blproc.ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, @@ -314,9 +321,9 @@ func CreateMockArguments() blproc.ArgShardProcessor { Hasher: &mock.HasherStub{}, Marshalizer: &mock.MarshalizerMock{}, Store: initStore(), - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - NodesCoordinator: mock.NewNodesCoordinatorMock(), - SpecialAddressHandler: &mock.SpecialAddressHandlerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), RequestHandler: &mock.RequestHandlerMock{}, diff --git a/process/block/export_test.go b/process/block/export_test.go index 033286f5fdf..1e99c18c592 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -45,7 +45,6 @@ func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint } func (sp *shardProcessor) GetProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { - return sp.getProcessedMetaBlocksFromHeader(header) } @@ -54,7 +53,13 @@ func (sp *shardProcessor) RemoveProcessedMetablocksFromPool(processedMetaHdrs [] } func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*shardProcessor, error) { - + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := ArgShardProcessor{ ArgBaseProcessor: &ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, @@ -62,9 +67,9 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo Hasher: &mock.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, Store: &mock.ChainStorerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - NodesCoordinator: mock.NewNodesCoordinatorMock(), - SpecialAddressHandler: &mock.SpecialAddressHandlerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, StartHeaders: genesisBlocks, RequestHandler: &mock.RequestHandlerMock{}, diff --git a/process/block/metablock.go b/process/block/metablock.go index 99a6bbb53b7..98a026879cc 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -225,7 +225,7 @@ func (mp *metaProcessor) ProcessBlock( } // SetConsensusData - sets the reward addresses for the current consensus group -func (mp *metaProcessor) SetConsensusData(consensusRewardAddresses []string, round uint64) { +func (mp *metaProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { // nothing to do } diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 3275273227f..f6f556083ef 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -269,7 +269,8 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { tdp := initDataPool() adrConv := &mock.AddressConverterMock{} shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) th, err := NewRewardTxHandler( addr, &mock.HasherMock{}, @@ -302,7 +303,8 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing tdp := initDataPool() adrConv := &mock.AddressConverterMock{} shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) th, err := NewRewardTxHandler( addr, &mock.HasherMock{}, @@ -337,7 +339,8 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsOK(t *testing.T) { tdp := initDataPool() adrConv := &mock.AddressConverterMock{} shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) - addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) th, err := NewRewardTxHandler( addr, &mock.HasherMock{}, @@ -365,11 +368,13 @@ func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { t.Parallel() shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + nodesCoordinator := mock.NewNodesCoordinatorMock() tdp := initDataPool() th, err := NewRewardTxHandler( mock.NewSpecialAddressHandlerMock( &mock.AddressConverterMock{}, shardCoordinator, + nodesCoordinator, ), &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -395,19 +400,17 @@ func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { t.Parallel() + nodesCoordinator := mock.NewNodesCoordinatorMock() shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) tdp := initDataPool() specialAddress := &mock.SpecialAddressHandlerMock{ AdrConv: &mock.AddressConverterMock{}, ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, } - consensusAddresses := []string{ - "1000000000000000000000000000000000000000000000000000000000000000", - "2000000000000000000000000000000000000000000000000000000000000000", - } - - specialAddress.SetConsensusData(consensusAddresses, 0, 0) + _ = specialAddress.SetShardConsensusData([]byte("random"), 0, 0, shardCoordinator.SelfId()) + rewardData := specialAddress.ConsensusShardRewardData() th, err := NewRewardTxHandler( specialAddress, @@ -422,28 +425,23 @@ func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, th) - txs := []data.TransactionHandler{ - &rewardTx.RewardTx{ - Round: 0, - Epoch: 0, - Value: big.NewInt(1), - RcvAddr: []byte(consensusAddresses[0]), - ShardId: 0, - }, - &rewardTx.RewardTx{ + txs := make([]data.TransactionHandler, len(rewardData.Addresses)) + for i := 0; i < len(rewardData.Addresses); i++ { + txs[i] = &rewardTx.RewardTx{ Round: 0, Epoch: 0, Value: big.NewInt(1), - RcvAddr: []byte(consensusAddresses[1]), + RcvAddr: []byte(rewardData.Addresses[i]), ShardId: 0, - }, + } + } err = th.AddIntermediateTransactions(txs) assert.Nil(t, err) finishedTxs := th.GetAllCurrentFinishedTxs() - assert.Equal(t, 2, len(txs)) + assert.Equal(t, len(txs), len(finishedTxs)) for _, ftx := range finishedTxs { found := false diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 33bb32469db..e9d956a7530 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -169,7 +169,12 @@ func (sp *shardProcessor) ProcessBlock( log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) - err = sp.setShardConsensusData(headerHandler) + err = sp.specialAddressHandler.SetShardConsensusData( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + headerHandler.GetEpoch(), + headerHandler.GetShardID(), + ) if err != nil { return err } @@ -254,22 +259,6 @@ func (sp *shardProcessor) ProcessBlock( return nil } -func (sp *shardProcessor) setShardConsensusData(headerHandler data.HeaderHandler) error { - // give transaction coordinator the consensus group validators addresses where to send the rewards. - consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( - headerHandler.GetPrevRandSeed(), - headerHandler.GetRound(), - sp.shardCoordinator.SelfId(), - ) - if err != nil { - return err - } - - sp.SetConsensusData(consensusAddresses, headerHandler.GetRound()) - - return nil -} - func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.HeaderHandler) error { sp.specialAddressHandler.ClearMetaConsensusData() @@ -277,24 +266,21 @@ func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.Header for _, metaBlock := range finalizedMetaBlocks { round := metaBlock.GetRound() epoch := metaBlock.GetEpoch() - consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( - metaBlock.GetPrevRandSeed(), - round, - metaBlock.GetShardID(), - ) + err := sp.specialAddressHandler.SetMetaConsensusData(metaBlock.GetPrevRandSeed(), round, epoch) if err != nil { return err } - - sp.specialAddressHandler.SetMetaConsensusData(consensusAddresses, round, epoch) } return nil } -// SetConsensusData - sets the reward addresses for the current consensus group -func (sp *shardProcessor) SetConsensusData(consensusRewardAddresses []string, round uint64) { - sp.specialAddressHandler.SetConsensusData(consensusRewardAddresses, round, 0) +// SetConsensusData - sets the reward data for the current consensus group +func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + err := sp.specialAddressHandler.SetShardConsensusData(randomness, round, epoch, shardId) + if err != nil { + log.Error(err.Error()) + } } // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction @@ -575,7 +561,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[key] + _, ok = crossMiniBlockHashes[key] if !ok { continue } @@ -949,7 +935,7 @@ func (sp *shardProcessor) getProcessedMetaBlocks( } for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] if !ok { continue } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 518160e08ef..d817e266749 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -91,8 +91,6 @@ func CreateMockArgumentsMultiShard() blproc.ArgShardProcessor { arguments.DataPool = initDataPool([]byte("tx_hash1")) arguments.Accounts = initAccountsMock() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) - arguments.NodesCoordinator = mock.NewNodesCoordinatorMock() - arguments.SpecialAddressHandler = &mock.SpecialAddressHandlerMock{} arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) return arguments @@ -1660,7 +1658,7 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 + return 0 }, } arguments.BlocksTracker = &mock.BlocksTrackerMock{ @@ -3966,7 +3964,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t arguments := CreateMockArgumentsMultiShard() sp, _ := blproc.NewShardProcessor(arguments) - hdrs,_,_ := sp.GetHighestHdrForOwnShardFromMetachain(nil) + hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(nil) assert.NotNil(t, hdrs) assert.Equal(t, uint64(0), hdrs[0].GetNonce()) diff --git a/process/errors.go b/process/errors.go index 6ae9806cca5..348fdca5f18 100644 --- a/process/errors.go +++ b/process/errors.go @@ -46,9 +46,6 @@ var ErrNilBlockChain = errors.New("nil block chain") // ErrNilMetaBlockHeader signals that an operation has been attempted to or with a nil metablock var ErrNilMetaBlockHeader = errors.New("nil metablock header") -// ErrNilMetaBlockHashes signals that a nil array of metablock header hashes was provided -var ErrNilMetaBlockHashes = errors.New("nil metablock header hashes") - // ErrNilTxBlockBody signals that an operation has been attempted to or with a nil tx block body var ErrNilTxBlockBody = errors.New("nil tx block body") diff --git a/process/interface.go b/process/interface.go index e2afbbde788..797299ee5ea 100644 --- a/process/interface.go +++ b/process/interface.go @@ -124,15 +124,15 @@ type TransactionFeeHandler interface { // SpecialAddressHandler responds with needed special addresses type SpecialAddressHandler interface { - SetElrondCommunityAddress(elrond []byte) - ElrondCommunityAddress() []byte - SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) + SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error + SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error ConsensusShardRewardData() *data.ConsensusRewardData - SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) - ClearMetaConsensusData() ConsensusMetaRewardData() []*data.ConsensusRewardData + ClearMetaConsensusData() + ElrondCommunityAddress() []byte LeaderAddress() []byte BurnAddress() []byte + SetElrondCommunityAddress(elrond []byte) ShardIdForAddress([]byte) (uint32, error) Epoch() uint32 Round() uint64 @@ -174,7 +174,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - SetConsensusData(consensusRewardAddresses []string, round uint64) + SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) IsInterfaceNil() bool } diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index a2127363b25..100e94f5e9b 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,7 +67,7 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } -func (blProcMock BlockProcessorMock) SetConsensusData([]string, uint64) { +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { panic("implement me") } diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go index 8af0bd4c074..661756611ef 100644 --- a/process/mock/specialAddressHandlerMock.go +++ b/process/mock/specialAddressHandlerMock.go @@ -13,6 +13,7 @@ type SpecialAddressHandlerMock struct { ShardIdForAddressCalled func([]byte) (uint32, error) AdrConv state.AddressConverter ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator shardConsensusData *data.ConsensusRewardData metaConsensusData []*data.ConsensusRewardData @@ -21,6 +22,7 @@ type SpecialAddressHandlerMock struct { func NewSpecialAddressHandlerMock( addrConv state.AddressConverter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, ) *SpecialAddressHandlerMock { return &SpecialAddressHandlerMock{ ElrondCommunityAddressCalled: nil, @@ -29,6 +31,7 @@ func NewSpecialAddressHandlerMock( ShardIdForAddressCalled: nil, AdrConv: addrConv, ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, shardConsensusData: &data.ConsensusRewardData{ Round: 0, Epoch: 0, @@ -41,28 +44,42 @@ func NewSpecialAddressHandlerMock( func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { } -func (sh *SpecialAddressHandlerMock) SetConsensusData(consensusRewardAddresses []string, round uint64, epoch uint32) { +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + sh.shardConsensusData = &data.ConsensusRewardData{ Round: round, Epoch: epoch, - Addresses: consensusRewardAddresses, + Addresses: addresses, } + + return nil } func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { return sh.shardConsensusData } -func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(rewardAddresses []string, round uint64, epoch uint32) { +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { if sh.metaConsensusData == nil { sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) } + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ Round: round, Epoch: epoch, - Addresses: rewardAddresses, + Addresses: addresses, }) + + return nil } func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { From 349e18aeee72febf2f06241f49512012a8d9f786 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 25 Sep 2019 21:47:55 +0300 Subject: [PATCH 139/234] added test bootstrapper wrappers for easier testing refactored integration test --- consensus/mock/bootstrapMock.go | 5 ++ integrationTests/interface.go | 11 ++++ integrationTests/sync/common.go | 72 +++++++++++++++++++++++++ integrationTests/sync/edgeCases_test.go | 12 ++++- integrationTests/testProcessorNode.go | 4 +- integrationTests/testSyncNode.go | 13 +++-- process/sync/testMetaBootstrap.go | 20 +++++++ process/sync/testShardBootstrap.go | 20 +++++++ 8 files changed, 149 insertions(+), 8 deletions(-) create mode 100644 integrationTests/interface.go create mode 100644 process/sync/testMetaBootstrap.go create mode 100644 process/sync/testShardBootstrap.go diff --git a/consensus/mock/bootstrapMock.go b/consensus/mock/bootstrapMock.go index 342eead37f7..f297bb9ea66 100644 --- a/consensus/mock/bootstrapMock.go +++ b/consensus/mock/bootstrapMock.go @@ -14,6 +14,7 @@ type BootstrapperMock struct { StartSyncCalled func() StopSyncCalled func() SetStatusHandlerCalled func(handler core.AppStatusHandler) error + ManualRollbackCalled func() error } func (boot *BootstrapperMock) CreateAndCommitEmptyBlock(shardForCurrentNode uint32) (data.BodyHandler, data.HeaderHandler, error) { @@ -54,6 +55,10 @@ func (boot *BootstrapperMock) SetStatusHandler(handler core.AppStatusHandler) er return boot.SetStatusHandlerCalled(handler) } +func (boot *BootstrapperMock) ManualRollback() error { + return boot.ManualRollbackCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (boot *BootstrapperMock) IsInterfaceNil() bool { if boot == nil { diff --git a/integrationTests/interface.go b/integrationTests/interface.go new file mode 100644 index 00000000000..455fccfa8be --- /dev/null +++ b/integrationTests/interface.go @@ -0,0 +1,11 @@ +package integrationTests + +import "github.com/ElrondNetwork/elrond-go/process" + +// TestBootstrapper extends the Bootstrapper interface with some funcs intended to be used only in tests +// as it simplifies the reproduction of edge cases +type TestBootstrapper interface { + process.Bootstrapper + ManualRollback() error + SetProbableHighestNonce(nonce uint64) +} diff --git a/integrationTests/sync/common.go b/integrationTests/sync/common.go index fcc0659c5be..7fe0c676791 100644 --- a/integrationTests/sync/common.go +++ b/integrationTests/sync/common.go @@ -6,6 +6,7 @@ import ( "sync/atomic" "time" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/sharding" @@ -108,3 +109,74 @@ func proposeBlocks( integrationTests.ProposeBlock(nodes, []int{proposer}, crtRound, crtNonce) } } + +func manualRollback(nodes []*integrationTests.TestProcessorNode, shardId uint32, targetNonce uint64) { + for idx, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + + if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { + continue + } + + oldNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() + err := n.Bootstrapper.ManualRollback() + if err != nil { + fmt.Println(err) + } + newNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() + fmt.Printf("Node's id %d had nonce %d, now is %d\n", idx, oldNonce, newNonce) + } +} + +func emptyDataPools(nodes []*integrationTests.TestProcessorNode, shardId uint32) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + + emptyNodeDataPool(n) + } +} + +func emptyNodeDataPool(node *integrationTests.TestProcessorNode) { + if node.ShardDataPool != nil { + emptyShardDataPool(node.ShardDataPool) + } + + if node.MetaDataPool != nil { + emptyMetaDataPool(node.MetaDataPool) + } +} + +func emptyShardDataPool(sdp dataRetriever.PoolsHolder) { + sdp.HeadersNonces().Clear() + sdp.Headers().Clear() + sdp.UnsignedTransactions().Clear() + sdp.Transactions().Clear() + sdp.MetaBlocks().Clear() + sdp.MiniBlocks().Clear() + sdp.PeerChangesBlocks().Clear() +} + +func emptyMetaDataPool(holder dataRetriever.MetaPoolsHolder) { + holder.HeadersNonces().Clear() + holder.MetaChainBlocks().Clear() + holder.MiniBlockHashes().Clear() + holder.ShardHeaders().Clear() +} + +func resetHighestProbableNonce(nodes []*integrationTests.TestProcessorNode, shardId uint32, targetNonce uint64) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + + if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { + continue + } + + n.Bootstrapper.SetProbableHighestNonce(targetNonce) + } +} diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go index 44fd83a71cf..83d78bac2cc 100644 --- a/integrationTests/sync/edgeCases_test.go +++ b/integrationTests/sync/edgeCases_test.go @@ -1,6 +1,7 @@ package sync import ( + "sync/atomic" "testing" "time" @@ -29,6 +30,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { startSyncingBlocks(nodes) round := uint64(0) + idxNonceShard := 0 idxNonceMeta := 1 nonces := []*uint64{new(uint64), new(uint64)} @@ -36,7 +38,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { updateRound(nodes, round) incrementNonces(nonces) - numRoundsBlocksAreProposedCorrectly := 2 + numRoundsBlocksAreProposedCorrectly := 3 proposeAndSyncBlocks( nodes, &round, @@ -45,6 +47,14 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { numRoundsBlocksAreProposedCorrectly, ) + shardIdToRollbackLastBlock := uint32(0) + manualRollback(nodes, shardIdToRollbackLastBlock, 3) + resetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 2) + emptyDataPools(nodes, shardIdToRollbackLastBlock) + + //revert also the nonce, so the same block nonce will be used when shard will propose the next block + atomic.AddUint64(nonces[idxNonceShard], ^uint64(0)) + numRoundsBlocksAreProposedOnlyByMeta := 2 proposeAndSyncBlocks( nodes, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 9b8f138e970..0bbe7ecc118 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -92,7 +92,7 @@ type TestProcessorNode struct { BlockTracker process.BlocksTracker BlockProcessor process.BlockProcessor BroadcastMessenger consensus.BroadcastMessenger - Bootstrapper process.Bootstrapper + Bootstrapper TestBootstrapper Rounder *mock.RounderMock //Node is used to call the functionality already implemented in it @@ -252,8 +252,8 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( tpn.ResolverFinder, - factory.HeadersTopic, factory.ShardHeadersForMetachainTopic, + factory.MetachainBlocksTopic, ) } else { resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 81f4e20ba52..7ab9aab0333 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -7,7 +7,6 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/sync" @@ -119,7 +118,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } } -func (tpn *TestProcessorNode) createShardBootstrapper() (process.Bootstrapper, error) { +func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error) { bootstrap, err := sync.NewShardBootstrap( tpn.ShardDataPool, tpn.Storage, @@ -139,10 +138,12 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (process.Bootstrapper, e return nil, err } - return bootstrap, nil + return &sync.TestShardBootstrap{ + ShardBootstrap: bootstrap, + }, nil } -func (tpn *TestProcessorNode) createMetaChainBootstrapper() (process.Bootstrapper, error) { +func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, error) { bootstrap, err := sync.NewMetaBootstrap( tpn.MetaDataPool, tpn.Storage, @@ -163,7 +164,9 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (process.Bootstrappe return nil, err } - return bootstrap, nil + return &sync.TestMetaBootstrap{ + MetaBootstrap: bootstrap, + }, nil } func (tpn *TestProcessorNode) initBootstrapper() { diff --git a/process/sync/testMetaBootstrap.go b/process/sync/testMetaBootstrap.go new file mode 100644 index 00000000000..d035eb9c779 --- /dev/null +++ b/process/sync/testMetaBootstrap.go @@ -0,0 +1,20 @@ +package sync + +import "github.com/ElrondNetwork/elrond-go/data/block" + +// TestMetaBootstrap extends MetaBootstrap and is used in integration tests as it exposes some funcs +// that are not supposed to be used in production code +// Exported funcs simplify the reproduction of edge cases +type TestMetaBootstrap struct { + *MetaBootstrap +} + +// ManualRollback calls the rollback on the current block from the blockchain structure +func (tmb *TestMetaBootstrap) ManualRollback() error { + return tmb.rollback(tmb.blkc.GetCurrentBlockHeader().(*block.MetaBlock)) +} + +// SetProbableHighestNonce sets the probable highest nonce in the contained fork detector +func (tmb *TestMetaBootstrap) SetProbableHighestNonce(nonce uint64) { + tmb.forkDetector.(*metaForkDetector).setProbableHighestNonce(nonce) +} diff --git a/process/sync/testShardBootstrap.go b/process/sync/testShardBootstrap.go new file mode 100644 index 00000000000..43de789d2b4 --- /dev/null +++ b/process/sync/testShardBootstrap.go @@ -0,0 +1,20 @@ +package sync + +import "github.com/ElrondNetwork/elrond-go/data/block" + +// TestShardBootstrap extends ShardBootstrap and is used in integration tests as it exposes some funcs +// that are not supposed to be used in production code +// Exported funcs simplify the reproduction of edge cases +type TestShardBootstrap struct { + *ShardBootstrap +} + +// ManualRollback calls the rollback on the current block from the blockchain structure +func (tsb *TestShardBootstrap) ManualRollback() error { + return tsb.rollback(tsb.blkc.GetCurrentBlockHeader().(*block.Header)) +} + +// SetProbableHighestNonce sets the probable highest nonce in the contained fork detector +func (tsb *TestShardBootstrap) SetProbableHighestNonce(nonce uint64) { + tsb.forkDetector.(*shardForkDetector).setProbableHighestNonce(nonce) +} From 1122e54a8bf3cdb7492a831ecdc451337d2578b4 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 26 Sep 2019 10:22:37 +0300 Subject: [PATCH 140/234] EN-3981 : fix after review --- cmd/node/main.go | 7 ++--- consensus/spos/bls/blsSubroundsFactory.go | 4 +-- consensus/spos/bn/bnSubroundsFactory.go | 4 +-- consensus/spos/sposFactory/sposFactory.go | 4 +-- core/indexer/elasticsearch.go | 23 ++++++++++----- node/defineOptions.go | 1 - node/defineOptions_test.go | 13 ++++++++ node/mock/indexerMock.go | 36 +++++++++++++++++++++++ 8 files changed, 73 insertions(+), 19 deletions(-) create mode 100644 node/mock/indexerMock.go diff --git a/cmd/node/main.go b/cmd/node/main.go index 770edd8b5d0..006ae7e7647 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -682,15 +682,14 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - indexValidatorsListIfNeeded(elasticIndexer, nodesCoordinator) - } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + indexValidatorsListIfNeeded(elasticIndexer, nodesCoordinator) + } vmAccountsDB, err := hooks.NewVMAccountsDB( stateComponents.AccountsAdapter, stateComponents.AddressConverter, ) - if err != nil { return err } diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index b53d3d9ebd1..836e48bcc58 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -65,7 +65,7 @@ func checkNewFactoryParams( return nil } -// SetAppStatusHandler method set appStatusHandler +// SetAppStatusHandler method will update the value of the factory's appStatusHandler func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { if ash == nil || ash.IsInterfaceNil() { return spos.ErrNilAppStatusHandler @@ -75,7 +75,7 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } -// SetIndexer method set indexer +// SetIndexer method will update the value of the factory's indexer func (fct *factory) SetIndexer(indexer indexer.Indexer) { fct.indexer = indexer } diff --git a/consensus/spos/bn/bnSubroundsFactory.go b/consensus/spos/bn/bnSubroundsFactory.go index 81928fe94fa..816f0d173a7 100644 --- a/consensus/spos/bn/bnSubroundsFactory.go +++ b/consensus/spos/bn/bnSubroundsFactory.go @@ -69,7 +69,7 @@ func checkNewFactoryParams( return nil } -// SetAppStatusHandler method set appStatusHandler +// SetAppStatusHandler method will update the value of the factory's appStatusHandler func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { if ash == nil || ash.IsInterfaceNil() { return spos.ErrNilAppStatusHandler @@ -79,7 +79,7 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } -// SetIndexer method set indexer +// SetIndexer method will update the value of the factory's indexer func (fct *factory) SetIndexer(indexer indexer.Indexer) { fct.indexer = indexer } diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index 776c0478d29..394950911e9 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -31,12 +31,12 @@ func GetSubroundsFactory( } err = subRoundFactoryBls.SetAppStatusHandler(appStatusHandler) - - subRoundFactoryBls.SetIndexer(indexer) if err != nil { return nil, err } + subRoundFactoryBls.SetIndexer(indexer) + return subRoundFactoryBls, nil case bnConsensusType: subRoundFactoryBn, err := bn.NewSubroundsFactory(consensusDataContainer, consensusState, worker) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 922349da772..35356167a14 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -238,18 +238,18 @@ func (ei *elasticIndexer) SaveBlock( } } -// SaveRoundInfo will save on elastic search information about round +// SaveRoundInfo will save data about a round on elastic search sounds better func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { var buff bytes.Buffer - serializedSignersIndexes, err := ei.marshalizer.Marshal(RoundInfo{SignersIndexes: signersIndexes, ShardId: shardId}) + marshalizedSignersIndexes, err := ei.marshalizer.Marshal(RoundInfo{SignersIndexes: signersIndexes, ShardId: shardId}) if err != nil { ei.logger.Warn("could not marshal signers indexes") return } - buff.Grow(len(serializedSignersIndexes)) - buff.Write(serializedSignersIndexes) + buff.Grow(len(marshalizedSignersIndexes)) + buff.Write(marshalizedSignersIndexes) req := esapi.IndexRequest{ Index: roundIndex, @@ -271,18 +271,25 @@ func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersInde } } -//SaveValidatorsPubKeys will sent all validators public keys to elastic search +//SaveValidatorsPubKeys will send all validators public keys to elastic search func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { var buff bytes.Buffer - serializedValidatorPubKeys, err := ei.marshalizer.Marshal(validatorsPubKeys) + valPubKeys := make(map[uint32][]string, 0) + for shardId, shardPubKeys := range validatorsPubKeys { + for _, pubKey := range shardPubKeys { + valPubKeys[shardId] = append(valPubKeys[shardId], hex.EncodeToString(pubKey)) + } + } + + marshalizedValidatorPubKeys, err := ei.marshalizer.Marshal(valPubKeys) if err != nil { ei.logger.Warn("could not marshal validators public keys") return } - buff.Grow(len(serializedValidatorPubKeys)) - buff.Write(serializedValidatorPubKeys) + buff.Grow(len(marshalizedValidatorPubKeys)) + buff.Write(marshalizedValidatorPubKeys) req := esapi.IndexRequest{ Index: validatorsIndex, diff --git a/node/defineOptions.go b/node/defineOptions.go index decc3ccdffb..7995fc354cb 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -406,7 +406,6 @@ func WithAppStatusHandler(aph core.AppStatusHandler) Option { func WithIndexer(indexer indexer.Indexer) Option { return func(n *Node) error { n.indexer = indexer - return nil } } diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 4be5f82c24e..408395da9f1 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -789,3 +789,16 @@ func TestWithAppStatusHandler_OkAshShouldPass(t *testing.T) { assert.IsType(t, &statusHandler.NilStatusHandler{}, node.appStatusHandler) assert.Nil(t, err) } + +func TestWithIndexer_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + indexer := &mock.IndexerMock{} + opt := WithIndexer(indexer) + err := opt(node) + + assert.True(t, node.indexer == indexer) + assert.Nil(t, err) +} diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go new file mode 100644 index 00000000000..5440361c6be --- /dev/null +++ b/node/mock/indexerMock.go @@ -0,0 +1,36 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +// IndexerMock is a mock implementation fot the Indexer interface +type IndexerMock struct { + SaveBlockCalled func(body block.Body, header *block.Header) +} + +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { + panic("implement me") +} + +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (im *IndexerMock) IsInterfaceNil() bool { + if im == nil { + return true + } + return false +} From b5c121564d42eaba582504c0c4e16ef0fb967346 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 26 Sep 2019 10:29:05 +0300 Subject: [PATCH 141/234] updated test comment --- integrationTests/sync/edgeCases_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go index 83d78bac2cc..e703862b948 100644 --- a/integrationTests/sync/edgeCases_test.go +++ b/integrationTests/sync/edgeCases_test.go @@ -11,9 +11,10 @@ import ( ) // TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard tests the following scenario: -// 1. Meta and shard 0 are in sync, shard 0 stops producing blocks -// 2. Shard 0 resumes producing block, having a new block with nonce 5 in a higher round than notarized by metachain -// 3. A bootstrapping meta node should be able to pass block with nonce 4 +// 1. Meta and shard 0 are in sync, producing blocks +// 2. At nonce 3, shard 0 makes a rollback and stops producing blocks for 2 rounds, meta keeps producing blocks +// 3. Shard 0 resumes creating blocks starting with nonce 3 +// 3. A bootstrapping meta node should be able to pass meta block with nonce 2 func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") From 5520f833ddf3ce80ff41c509c09656f6d57486ed Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 26 Sep 2019 11:11:16 +0300 Subject: [PATCH 142/234] process: remove duplicated code --- process/block/shardblock.go | 67 ++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 39 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index e9d956a7530..c0818ecee35 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -231,7 +231,7 @@ func (sp *shardProcessor) ProcessBlock( } }() - processedMetaHdrs, err := sp.getProcessedMetaBlocks(body, header.MetaBlockHashes) + processedMetaHdrs, err := sp.getProcessedMetaBlocksFromMiniBlocks(body, header.MetaBlockHashes) if err != nil { return err } @@ -843,51 +843,31 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range header.MetaBlockHashes { - obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if obj == nil { - return nil, process.ErrNilMetaBlockHeader - } + processedMetaHeaders, usedMbs, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, header.MetaBlockHashes) + if err != nil { + return nil, err + } - metaBlock, ok := obj.(*block.MetaBlock) + for _, metaBlockKey := range header.MetaBlockHashes { + obj, ok := sp.dataPool.MetaBlocks().Peek(metaBlockKey) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, process.ErrNilMetaBlockHeader } - // todo: change to debug after test - log.Info(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) + metaBlock := obj.(*block.MetaBlock) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[string(miniBlockHashes[key])] - if !ok { - continue - } - - metaBlock.SetMiniBlockProcessed(miniBlockHashes[key], true) - delete(miniBlockHashes, key) - } - - log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) - - processedAll := true for key := range crossMiniBlockHashes { - if !metaBlock.GetMiniBlockProcessed([]byte(key)) { - processedAll = false - break + if usedMbs[key] { + metaBlock.SetMiniBlockProcessed([]byte(key), true) } } - - if processedAll { - processedMetaHdrs = append(processedMetaHdrs, metaBlock) - } } - return processedMetaHdrs, nil + return processedMetaHeaders, nil } // getProcessedMetaBlocks returns all the meta blocks fully processed -func (sp *shardProcessor) getProcessedMetaBlocks( +func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( usedMiniBlocks []*block.MiniBlock, usedMetaBlockHashes [][]byte, ) ([]data.HeaderHandler, error) { @@ -912,19 +892,28 @@ func (sp *shardProcessor) getProcessedMetaBlocks( } log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + processedMetaBlocks, _, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, usedMetaBlockHashes) + + return processedMetaBlocks, err +} + +func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( + miniBlockHashes map[int][]byte, + usedMetaBlockHashes [][]byte, +) ([]data.HeaderHandler, map[string]bool, error) { processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range usedMetaBlockHashes { - processedMBs := make(map[string]bool) + processedMBs := make(map[string]bool) + for _, metaBlockKey := range usedMetaBlockHashes { obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) if obj == nil { - return nil, process.ErrNilMetaBlockHeader + return nil, nil, process.ErrNilMetaBlockHeader } metaBlock, ok := obj.(*block.MetaBlock) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } // todo: change to debug after test log.Info(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) @@ -960,7 +949,7 @@ func (sp *shardProcessor) getProcessedMetaBlocks( } } - return processedMetaHdrs, nil + return processedMetaHdrs, processedMBs, nil } func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { @@ -1469,7 +1458,7 @@ func (sp *shardProcessor) createMiniBlocks( log.Info(err.Error()) } - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocks(destMeMiniBlocks, usedMetaHdrsHashes) + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks, usedMetaHdrsHashes) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } From 63e47a5ba54d49751df8f29b818c9021312cffdc Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 26 Sep 2019 11:33:11 +0300 Subject: [PATCH 143/234] uncommented test code --- .../metablock/blocksDissemination_test.go | 101 +++++++++--------- 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index a431f5673e9..babcacf45de 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -130,54 +131,54 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterHdrRecv)) } - //fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its hash...") - //_, metaHdr, _ := nodes[1].ProposeBlock(1, 1) - //metaHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(metaHdr) - //metaHeaderHash := integrationTests.TestHasher.Compute(string(metaHeaderBytes)) - //for i := 0; i < numMetaNodes; i++ { - // nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash, metaHdr) - //} - // - //for i := 0; i < maxNumRequests; i++ { - // resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) - // assert.Nil(t, err) - // _ = resolver.RequestDataFromHash(metaHeaderHash) - // - // fmt.Println(integrationTests.MakeDisplayTable(nodes)) - // - // time.Sleep(time.Second) - //} - // - ////all node should have received the meta header - //for _, n := range nodes { - // assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) - //} - // - //fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its nonce...") - //_, metaHdr2, _ := nodes[1].ProposeBlock(2, 2) - //metaHdr2.SetNonce(64) - //metaHeaderBytes2, _ := integrationTests.TestMarshalizer.Marshal(metaHdr2) - //metaHeaderHash2 := integrationTests.TestHasher.Compute(string(metaHeaderBytes2)) - //for i := 0; i < numMetaNodes; i++ { - // nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash2, metaHdr2) - // - // syncMap := &dataPool.ShardIdHashSyncMap{} - // syncMap.Store(sharding.MetachainShardId, metaHeaderHash2) - // nodes[i+1].MetaDataPool.HeadersNonces().Merge(metaHdr2.GetNonce(), syncMap) - //} - // - //for i := 0; i < maxNumRequests; i++ { - // resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) - // assert.Nil(t, err) - // _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(metaHdr2.GetNonce()) - // - // fmt.Println(integrationTests.MakeDisplayTable(nodes)) - // - // time.Sleep(time.Second) - //} - // - ////all node should have received the meta header - //for _, n := range nodes { - // assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterMetaRcv)) - //} + fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its hash...") + _, metaHdr, _ := nodes[1].ProposeBlock(1, 1) + metaHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(metaHdr) + metaHeaderHash := integrationTests.TestHasher.Compute(string(metaHeaderBytes)) + for i := 0; i < numMetaNodes; i++ { + nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash, metaHdr) + } + + for i := 0; i < maxNumRequests; i++ { + resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + assert.Nil(t, err) + _ = resolver.RequestDataFromHash(metaHeaderHash) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + + time.Sleep(time.Second) + } + + //all node should have received the meta header + for _, n := range nodes { + assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) + } + + fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its nonce...") + _, metaHdr2, _ := nodes[1].ProposeBlock(2, 2) + metaHdr2.SetNonce(64) + metaHeaderBytes2, _ := integrationTests.TestMarshalizer.Marshal(metaHdr2) + metaHeaderHash2 := integrationTests.TestHasher.Compute(string(metaHeaderBytes2)) + for i := 0; i < numMetaNodes; i++ { + nodes[i+1].MetaDataPool.MetaChainBlocks().HasOrAdd(metaHeaderHash2, metaHdr2) + + syncMap := &dataPool.ShardIdHashSyncMap{} + syncMap.Store(sharding.MetachainShardId, metaHeaderHash2) + nodes[i+1].MetaDataPool.HeadersNonces().Merge(metaHdr2.GetNonce(), syncMap) + } + + for i := 0; i < maxNumRequests; i++ { + resolver, err := nodes[0].ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + assert.Nil(t, err) + _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(metaHdr2.GetNonce()) + + fmt.Println(integrationTests.MakeDisplayTable(nodes)) + + time.Sleep(time.Second) + } + + //all node should have received the meta header + for _, n := range nodes { + assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterMetaRcv)) + } } From 0adf4c718ea774a9ef0030ffb17bd3f365a72096 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 26 Sep 2019 11:34:38 +0300 Subject: [PATCH 144/234] reverted blocksDissemination_test --- .../multiShard/metablock/blocksDissemination_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index babcacf45de..23a9e90a2f3 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -12,7 +12,6 @@ import ( "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/stretchr/testify/assert" ) @@ -109,16 +108,13 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { shardHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(hdr) shardHeaderHash := integrationTests.TestHasher.Compute(string(shardHeaderBytes)) nodes[0].ShardDataPool.Headers().HasOrAdd(shardHeaderHash, hdr) - m := &dataPool.ShardIdHashSyncMap{} - m.Store(0, shardHeaderHash) - nodes[0].ShardDataPool.HeadersNonces().Merge(1, m) maxNumRequests := 5 for i := 0; i < maxNumRequests; i++ { for j := 0; j < numMetaNodes; j++ { resolver, err := nodes[j+1].ResolverFinder.CrossShardResolver(factory.ShardHeadersForMetachainTopic, senderShard) assert.Nil(t, err) - _ = resolver.(*resolvers.HeaderResolver).RequestDataFromNonce(1) + _ = resolver.RequestDataFromHash(shardHeaderHash) } fmt.Println(integrationTests.MakeDisplayTable(nodes)) From 7d1ddc4fea00bc45a722eb30e33c2a2c87ecf76b Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 26 Sep 2019 11:47:58 +0300 Subject: [PATCH 145/234] EN-4112: fixes after review --- statusHandler/view/termuic/termuiConsole.go | 6 +----- statusHandler/view/termuic/termuiRenders/widgetsRender.go | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/statusHandler/view/termuic/termuiConsole.go b/statusHandler/view/termuic/termuiConsole.go index bf738008009..c08a03b7b41 100644 --- a/statusHandler/view/termuic/termuiConsole.go +++ b/statusHandler/view/termuic/termuiConsole.go @@ -112,7 +112,7 @@ func (tc *TermuiConsole) processUiEvents(e ui.Event) { func (tc *TermuiConsole) doChanges(counter *uint32) { atomic.AddUint32(counter, 1) if atomic.LoadUint32(counter) > numOfTicksBeforeRedrawing { - tc.doFakeResize() + tc.doResize(ui.TerminalDimensions()) atomic.StoreUint32(counter, 0) } else { tc.refreshWindow() @@ -124,10 +124,6 @@ func (tc *TermuiConsole) doResizeEvent(e ui.Event) { tc.doResize(payload.Width, payload.Height) } -func (tc *TermuiConsole) doFakeResize() { - tc.doResize(ui.TerminalDimensions()) -} - func (tc *TermuiConsole) doResize(width int, height int) { tc.grid.SetRectangle(0, 0, width, height) tc.refreshWindow() diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 6b4064365c0..07a561899fd 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -276,8 +276,8 @@ func (wr *WidgetsRender) prepareListWithLogsForDisplay() { func (wr *WidgetsRender) prepareLogLines(logData []string, maxSize int) []string { logDataLen := len(logData) maxSize = maxSize - 2 // decrease 2 units as the total maxSize of the log list includes also the header and the footer - if maxSize < 0 { - maxSize = 0 + if maxSize <= 0 { + return []string{} // there isn't place for any log line } if logDataLen > maxSize { From 8f9babfc99f3df80d7c06a6bf44b778abf6217e6 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 26 Sep 2019 12:29:55 +0300 Subject: [PATCH 146/234] EN-4105 : fix test to work all the time --- process/common_test.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/process/common_test.go b/process/common_test.go index 7ecdb90bae2..5e25f6c8230 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -4,6 +4,7 @@ import ( "bytes" "sync" "testing" + "time" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -34,7 +35,7 @@ func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { assert.Equal(t, 3, readsCnt) } -func TestEmptyChannelShouldWorkOnNotBufferdChannel(t *testing.T) { +func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { ch := make(chan bool) assert.Equal(t, 0, len(ch)) @@ -43,19 +44,31 @@ func TestEmptyChannelShouldWorkOnNotBufferdChannel(t *testing.T) { assert.Equal(t, 0, readsCnt) wg := sync.WaitGroup{} + wgChanWasWritten := sync.WaitGroup{} numConcurrentWrites := 100 wg.Add(numConcurrentWrites) + wgChanWasWritten.Add(numConcurrentWrites) for i := 0; i < numConcurrentWrites; i++ { go func() { wg.Done() + time.Sleep(time.Millisecond) ch <- true + wgChanWasWritten.Done() }() } // wait for go routines to start wg.Wait() - readsCnt = process.EmptyChannel(ch) + go func() { + for readsCnt < numConcurrentWrites { + readsCnt += process.EmptyChannel(ch) + } + }() + + // wait for go routines to finish + wgChanWasWritten.Wait() + assert.Equal(t, 0, len(ch)) assert.Equal(t, numConcurrentWrites, readsCnt) } From 6e037f9929de3a411fae25c779f1b91add7ac687 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 26 Sep 2019 12:43:07 +0300 Subject: [PATCH 147/234] EN-4105 : fix test to work all the time --- core/indexer/elasticsearch.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 35356167a14..8860dd720d2 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -238,11 +238,16 @@ func (ei *elasticIndexer) SaveBlock( } } -// SaveRoundInfo will save data about a round on elastic search sounds better +// SaveRoundInfo will save data about a round on elastic func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { var buff bytes.Buffer - marshalizedSignersIndexes, err := ei.marshalizer.Marshal(RoundInfo{SignersIndexes: signersIndexes, ShardId: shardId}) + roundInfo := RoundInfo{ + SignersIndexes: signersIndexes, + ShardId: shardId, + } + + marshalizedSignersIndexes, err := ei.marshalizer.Marshal(roundInfo) if err != nil { ei.logger.Warn("could not marshal signers indexes") return From 1cefdcf38fbde616ef4f9b980879cd1e03307b15 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 26 Sep 2019 12:43:07 +0300 Subject: [PATCH 148/234] EN-3981 : fix after review --- core/indexer/elasticsearch.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 35356167a14..8860dd720d2 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -238,11 +238,16 @@ func (ei *elasticIndexer) SaveBlock( } } -// SaveRoundInfo will save data about a round on elastic search sounds better +// SaveRoundInfo will save data about a round on elastic func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { var buff bytes.Buffer - marshalizedSignersIndexes, err := ei.marshalizer.Marshal(RoundInfo{SignersIndexes: signersIndexes, ShardId: shardId}) + roundInfo := RoundInfo{ + SignersIndexes: signersIndexes, + ShardId: shardId, + } + + marshalizedSignersIndexes, err := ei.marshalizer.Marshal(roundInfo) if err != nil { ei.logger.Warn("could not marshal signers indexes") return From 941e2ed90fb2ae7ec71eb82b53f04bc3f2de9504 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Thu, 26 Sep 2019 14:43:12 +0300 Subject: [PATCH 149/234] EN-4112: fixes after review --- statusHandler/view/termuic/termuiRenders/widgetsRender.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 07a561899fd..bc6133c4eaf 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -273,15 +273,15 @@ func (wr *WidgetsRender) prepareListWithLogsForDisplay() { wr.lLog.WrapText = true } -func (wr *WidgetsRender) prepareLogLines(logData []string, maxSize int) []string { +func (wr *WidgetsRender) prepareLogLines(logData []string, size int) []string { logDataLen := len(logData) - maxSize = maxSize - 2 // decrease 2 units as the total maxSize of the log list includes also the header and the footer + maxSize := size - 2 // decrease 2 units as the total size of the log list includes also the header and the footer if maxSize <= 0 { return []string{} // there isn't place for any log line } if logDataLen > maxSize { - return logData[(logDataLen - maxSize):logDataLen] + return logData[(logDataLen - maxSize):] } return logData From dd3e1e713e0f85f3e19082dd58b25d651389aa22 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 26 Sep 2019 17:59:30 +0300 Subject: [PATCH 150/234] data: add unit tests and mocks --- data/address/specialAddresses_test.go | 306 +++++++++++++++++++++ data/mock/addressConverterMock.go | 66 +++++ data/mock/multipleShardsCoordinatorMock.go | 70 +++++ data/mock/nodesCoordinatorMock.go | 191 +++++++++++++ 4 files changed, 633 insertions(+) create mode 100644 data/address/specialAddresses_test.go create mode 100644 data/mock/addressConverterMock.go create mode 100644 data/mock/multipleShardsCoordinatorMock.go create mode 100644 data/mock/nodesCoordinatorMock.go diff --git a/data/address/specialAddresses_test.go b/data/address/specialAddresses_test.go new file mode 100644 index 00000000000..2c922a4c78b --- /dev/null +++ b/data/address/specialAddresses_test.go @@ -0,0 +1,306 @@ +package address + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" + "testing" +) + +type Args struct { + ElrondCommunityAddress []byte + BurnAddress []byte + AddrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordiator sharding.NodesCoordinator +} + +func initDefaultArgs() *Args { + args := &Args{ + ElrondCommunityAddress: []byte("community"), + BurnAddress: []byte("burn"), + AddrConv: &mock.AddressConverterMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), + NodesCoordiator: mock.NewNodesCoordinatorMock(), + } + + return args +} + +func createSpecialAddressFromArgs(args *Args) (process.SpecialAddressHandler, error) { + addr, err := NewSpecialAddressHolder( + args.ElrondCommunityAddress, + args.BurnAddress, + args.AddrConv, + args.ShardCoordinator, + args.NodesCoordiator, + ) + return addr, err +} + +func createDefaultSpecialAddress() process.SpecialAddressHandler { + args := initDefaultArgs() + addr, _ := createSpecialAddressFromArgs(args) + + return addr +} + +func TestNewSpecialAddressHolderNilCommunityAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ElrondCommunityAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilElrondAddress, err) +} + +func TestNewSpecialAddressHolderNilBurnAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.BurnAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilBurnAddress, err) +} + +func TestNewSpecialAddressHolderNilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.AddrConv = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilAddressConverter, err) +} + +func TestNewSpecialAddressHolderNilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ShardCoordinator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilShardCoordinator, err) +} + +func TestNewSpecialAddressHolderNilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.NodesCoordiator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilNodesCoordinator, err) +} + +func TestNewSpecialAddressHolderOK(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + addr, err := createSpecialAddressFromArgs(args) + + assert.NotNil(t, addr) + assert.Nil(t, err) +} + +func TestSpecialAddresses_ClearMetaConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + addr.ClearMetaConsensusData() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingOnceOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + err := addr.SetMetaConsensusData([]byte("randomness"), 0, 0) + assert.Nil(t, err) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingMultipleOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + err := addr.SetMetaConsensusData([]byte("randomness"), uint64(i), 0) + assert.Nil(t, err) + } +} + +func TestSpecialAddresses_ConsensusMetaRewardDataNoConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataOneConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + _ = addr.SetMetaConsensusData([]byte("randomness"), 1, 2) + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 1, len(metaConsensusData)) + assert.Equal(t, uint64(1), metaConsensusData[0].Round) + assert.Equal(t, uint32(2), metaConsensusData[0].Epoch) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataMultipleConsensusesDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + _ = addr.SetMetaConsensusData([]byte("randomness"), uint64(i+1), uint32(i+2)) + } + + metaConsensusData := addr.ConsensusMetaRewardData() + assert.Equal(t, nConsensuses, len(metaConsensusData)) + + for i := 0; i < nConsensuses; i++ { + assert.Equal(t, uint64(i+1), metaConsensusData[i].Round) + assert.Equal(t, uint32(i+2), metaConsensusData[i].Epoch) + } +} + +func TestSpecialAddresses_ConsensusShardRewardDataNoData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + shardRewardData := addr.ConsensusShardRewardData() + + assert.Nil(t, shardRewardData) +} + +func TestSpecialAddresses_ConsensusShardRewardDataExistingData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + shardRewardData := addr.ConsensusShardRewardData() + + assert.NotNil(t, shardRewardData) + assert.Equal(t, uint64(1), shardRewardData.Round) + assert.Equal(t, uint32(2), shardRewardData.Epoch) +} + +func TestSpecialAddresses_SetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + err := addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + + assert.Nil(t, err) +} + +func TestSpecialAddresses_BurnAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + burnAddr := addr.BurnAddress() + + assert.Equal(t, []byte("burn"), burnAddr) +} + +func TestSpecialAddresses_ElrondCommunityAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + communityAddr := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddr) +} + +func TestSpecialAddresses_LeaderAddressNoSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + leaderAddress := addr.LeaderAddress() + + assert.Nil(t, leaderAddress) +} + +func TestSpecialAddresses_LeaderAddressSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 0, 0, 0) + leaderAddress := addr.LeaderAddress() + + assert.Equal(t, "address00", string(leaderAddress)) +} + +func TestSpecialAddresses_Round(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + round := addr.Round() + + assert.Equal(t, uint64(0), round) +} + +func TestSpecialAddresses_Epoch(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + epoch := addr.Epoch() + + assert.Equal(t, uint32(2), epoch) +} + +func TestSpecialAddresses_SetElrondCommunityAddress(t *testing.T) { + addr := createDefaultSpecialAddress() + communityAddress := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddress) +} + +func TestSpecialAddresses_ShardIdForAddress(t *testing.T) { + args := initDefaultArgs() + args.ShardCoordinator = &mock.MultipleShardsCoordinatorMock{ + NoShards: 4, + ComputeIdCalled: func(address state.AddressContainer) uint32 { + return uint32(address.Bytes()[0]) + }, + CurrentShard: 0, + } + addr, _ := createSpecialAddressFromArgs(args) + shardId, err := addr.ShardIdForAddress([]byte{3}) + + assert.Nil(t, err) + assert.Equal(t, uint32(3), shardId) +} + +func TestSpecialAddresses_IsInterfaceNil(t *testing.T) { + addr := &specialAddresses{} + + addr = nil + isNil := addr.IsInterfaceNil() + + assert.True(t, isNil) +} diff --git a/data/mock/addressConverterMock.go b/data/mock/addressConverterMock.go new file mode 100644 index 00000000000..de5572b249e --- /dev/null +++ b/data/mock/addressConverterMock.go @@ -0,0 +1,66 @@ +package mock + +import ( + "bytes" + "encoding/hex" + "errors" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +var errFailure = errors.New("failure") + +type AddressConverterMock struct { + Fail bool + CreateAddressFromPublicKeyBytesRetErrForValue []byte +} + +func (acm *AddressConverterMock) CreateAddressFromPublicKeyBytes(pubKey []byte) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + if acm.CreateAddressFromPublicKeyBytesRetErrForValue != nil { + if bytes.Equal(acm.CreateAddressFromPublicKeyBytesRetErrForValue, pubKey) { + return nil, errors.New("error required") + } + } + + return NewAddressMockFromBytes(pubKey), nil +} + +func (acm *AddressConverterMock) ConvertToHex(addressContainer state.AddressContainer) (string, error) { + if acm.Fail { + return "", errFailure + } + + return hex.EncodeToString(addressContainer.Bytes()), nil +} + +func (acm *AddressConverterMock) CreateAddressFromHex(hexAddress string) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) PrepareAddressBytes(addressBytes []byte) ([]byte, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) AddressLen() int { + return 32 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (acm *AddressConverterMock) IsInterfaceNil() bool { + if acm == nil { + return true + } + return false +} diff --git a/data/mock/multipleShardsCoordinatorMock.go b/data/mock/multipleShardsCoordinatorMock.go new file mode 100644 index 00000000000..38a5ab1814e --- /dev/null +++ b/data/mock/multipleShardsCoordinatorMock.go @@ -0,0 +1,70 @@ +package mock + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type MultipleShardsCoordinatorMock struct { + NoShards uint32 + ComputeIdCalled func(address state.AddressContainer) uint32 + CurrentShard uint32 +} + +func NewMultipleShardsCoordinatorMock() *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: 1} +} + +func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: nrShard} +} + +func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { + return scm.NoShards +} + +func (scm *MultipleShardsCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { + if scm.ComputeIdCalled == nil { + return scm.SelfId() + } + return scm.ComputeIdCalled(address) +} + +func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { + return scm.CurrentShard +} + +func (scm *MultipleShardsCoordinatorMock) SetSelfId(shardId uint32) error { + return nil +} + +func (scm *MultipleShardsCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +func (scm *MultipleShardsCoordinatorMock) SetNoShards(noShards uint32) { + scm.NoShards = noShards +} + +// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID +// identifier is generated such as the first shard from identifier is always smaller than the last +func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { + if destShardID == scm.CurrentShard { + return fmt.Sprintf("_%d", scm.CurrentShard) + } + + if destShardID < scm.CurrentShard { + return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) + } + + return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { + if scm == nil { + return true + } + return false +} diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..56143df1eac --- /dev/null +++ b/data/mock/nodesCoordinatorMock.go @@ -0,0 +1,191 @@ +package mock + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + shards := make([]uint32, nbShards+1) + for i := uint32(0); i < nbShards; i++ { + shards[i] = i + } + shards[nbShards] = sharding.MetachainShardId + + for _, sh := range shards { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + pubKeys := make([]string, 0) + + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { + var consensusSize uint32 + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } + + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} From a0669bad7830a586c55fd8562fbdd03db97ae268 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 26 Sep 2019 18:04:45 +0300 Subject: [PATCH 151/234] data: goimports --- data/address/specialAddresses_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/address/specialAddresses_test.go b/data/address/specialAddresses_test.go index 2c922a4c78b..24a6df5e5cb 100644 --- a/data/address/specialAddresses_test.go +++ b/data/address/specialAddresses_test.go @@ -1,13 +1,14 @@ package address import ( + "testing" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" - "testing" ) type Args struct { From df37430a3670812c3a58db588c649ed8b2a81f67 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 26 Sep 2019 18:07:04 +0300 Subject: [PATCH 152/234] data: fix failing unit test --- data/address/specialAddresses_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/address/specialAddresses_test.go b/data/address/specialAddresses_test.go index 24a6df5e5cb..6fbf64f894a 100644 --- a/data/address/specialAddresses_test.go +++ b/data/address/specialAddresses_test.go @@ -261,7 +261,7 @@ func TestSpecialAddresses_Round(t *testing.T) { _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) round := addr.Round() - assert.Equal(t, uint64(0), round) + assert.Equal(t, uint64(1), round) } func TestSpecialAddresses_Epoch(t *testing.T) { From 3d5a281480b7a548737957adb81e28ec3ee3d342 Mon Sep 17 00:00:00 2001 From: Sebastian Marian <36901555+SebastianMarian@users.noreply.github.com> Date: Thu, 26 Sep 2019 20:11:27 +0300 Subject: [PATCH 153/234] EN-4155-Fix-bug-with-processed-mini-blocks-from-meta-blocks (#486) * Fixed bug --- core/computers.go | 15 +++++ data/block/block.go | 17 ----- data/block/metaBlock.go | 20 ------ data/interface.go | 2 - process/block/baseProcess.go | 8 +-- process/block/baseProcess_test.go | 1 + process/block/export_test.go | 12 +++- process/block/shardblock.go | 74 ++++++++++++++++++++-- process/block/shardblock_test.go | 45 +++++++------ process/constants.go | 1 + process/coordinator/process.go | 12 ++-- process/coordinator/process_test.go | 8 +-- process/interface.go | 2 +- process/mock/transactionCoordinatorMock.go | 6 +- process/sync/shardForkDetector.go | 4 +- 15 files changed, 143 insertions(+), 84 deletions(-) diff --git a/core/computers.go b/core/computers.go index d75351537cc..adb9b9aab6e 100644 --- a/core/computers.go +++ b/core/computers.go @@ -1,5 +1,9 @@ package core +import ( + "bytes" +) + // Max returns the maximum number between two given func Max(a uint32, b uint32) uint32 { if a > b { @@ -7,3 +11,14 @@ func Max(a uint32, b uint32) uint32 { } return b } + +// IsHashInList signals if the given hash exists in the given list of hashes +func IsHashInList(hash []byte, hashes [][]byte) bool { + for i := 0; i < len(hashes); i++ { + if bytes.Equal(hash, hashes[i]) { + return true + } + } + + return false +} diff --git a/data/block/block.go b/data/block/block.go index 9b1ad8403a0..5485ad58750 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -99,7 +99,6 @@ type Header struct { RootHash []byte `capid:"13"` MetaBlockHashes [][]byte `capid:"14"` TxCount uint32 `capid:"15"` - processedMBs map[string]bool } // Save saves the serialized data of a Block Header into a stream through Capnp protocol @@ -490,22 +489,6 @@ func (h *Header) MapMiniBlockHashesToShards() map[string]uint32 { return hashDst } -// GetMiniBlockProcessed verifies if miniblock from header was processed -func (h *Header) GetMiniBlockProcessed(hash []byte) bool { - if h.processedMBs == nil { - h.processedMBs = make(map[string]bool, 0) - } - return h.processedMBs[string(hash)] -} - -// SetMiniBlockProcessed set that miniblock with hash to processed or not processed -func (h *Header) SetMiniBlockProcessed(hash []byte, processed bool) { - if h.processedMBs == nil { - h.processedMBs = make(map[string]bool, 0) - } - h.processedMBs[string(hash)] = processed -} - // IntegrityAndValidity checks if data is valid func (b Body) IntegrityAndValidity() error { if b == nil || b.IsInterfaceNil() { diff --git a/data/block/metaBlock.go b/data/block/metaBlock.go index 80fb0afb2ac..8681272d9df 100644 --- a/data/block/metaBlock.go +++ b/data/block/metaBlock.go @@ -71,7 +71,6 @@ type MetaBlock struct { RandSeed []byte `capid:"10"` RootHash []byte `capid:"11"` TxCount uint32 `capid:"12"` - processedMBs map[string]bool } // MetaBlockBody hold the data for metablock body @@ -435,25 +434,6 @@ func (m *MetaBlock) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 return hashDst } -// GetMiniBlockProcessed verifies if miniblock from header was processed -func (m *MetaBlock) GetMiniBlockProcessed(hash []byte) bool { - if m.processedMBs == nil { - m.processedMBs = make(map[string]bool, 0) - } - if m.processedMBs[string(hash)] { - return true - } - return false -} - -// SetMiniBlockProcessed set that miniblock with hash to processed or not processed -func (m *MetaBlock) SetMiniBlockProcessed(hash []byte, processed bool) { - if m.processedMBs == nil { - m.processedMBs = make(map[string]bool, 0) - } - m.processedMBs[string(hash)] = processed -} - // IntegrityAndValidity return true as block is nil for metablock. func (m *MetaBlockBody) IntegrityAndValidity() error { return nil diff --git a/data/interface.go b/data/interface.go index 322c1170abe..8cdc27729ff 100644 --- a/data/interface.go +++ b/data/interface.go @@ -32,8 +32,6 @@ type HeaderHandler interface { SetTxCount(txCount uint32) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 - GetMiniBlockProcessed(hash []byte) bool - SetMiniBlockProcessed(hash []byte, processed bool) IsInterfaceNil() bool ItemsInBody() uint32 diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 590651953e9..9b7b5c424df 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -210,13 +210,13 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand //TODO: add verification if rand seed was correctly computed add other verification //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected if prevHdr.GetRound() >= currHdr.GetRound() { - log.Info(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", + log.Debug(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", currHdr.GetShardID(), prevHdr.GetRound(), currHdr.GetRound())) return process.ErrLowerRoundInBlock } if currHdr.GetNonce() != prevHdr.GetNonce()+1 { - log.Info(fmt.Sprintf("nonce does not match in shard %d: local block nonce is %d and node received block with nonce %d\n", + log.Debug(fmt.Sprintf("nonce does not match in shard %d: local block nonce is %d and node received block with nonce %d\n", currHdr.GetShardID(), prevHdr.GetNonce(), currHdr.GetNonce())) return process.ErrWrongNonceInBlock } @@ -227,13 +227,13 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - log.Info(fmt.Sprintf("block hash does not match in shard %d: local block hash is %s and node received block with previous hash %s\n", + log.Debug(fmt.Sprintf("block hash does not match in shard %d: local block hash is %s and node received block with previous hash %s\n", currHdr.GetShardID(), core.ToB64(prevHeaderHash), core.ToB64(currHdr.GetPrevHash()))) return process.ErrBlockHashDoesNotMatch } if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - log.Info(fmt.Sprintf("random seed does not match in shard %d: local block random seed is %s and node received block with previous random seed %s\n", + log.Debug(fmt.Sprintf("random seed does not match in shard %d: local block random seed is %s and node received block with previous random seed %s\n", currHdr.GetShardID(), core.ToB64(prevHdr.GetRandSeed()), core.ToB64(currHdr.GetPrevRandSeed()))) return process.ErrRandSeedDoesNotMatch } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 01ef310073f..3ddb5d4a6c4 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -257,6 +257,7 @@ func createDummyMetaBlock(destShardId uint32, senderShardId uint32, miniBlockHas metaBlock := &block.MetaBlock{ ShardInfo: []block.ShardData{ { + ShardId: senderShardId, ShardMiniBlockHeaders: make([]block.ShardMiniBlockHeader, len(miniBlockHashes)), }, }, diff --git a/process/block/export_test.go b/process/block/export_test.go index 210deaa5379..d95ad40d310 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -48,8 +48,8 @@ func (sp *shardProcessor) GetProcessedMetaBlocksFromHeader(header *block.Header) return sp.getProcessedMetaBlocksFromHeader(header) } -func (sp *shardProcessor) RemoveProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { - return sp.removeProcessedMetablocksFromPool(processedMetaHdrs) +func (sp *shardProcessor) RemoveProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { + return sp.removeProcessedMetaBlocksFromPool(processedMetaHdrs) } func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*shardProcessor, error) { @@ -293,3 +293,11 @@ func (sp *shardProcessor) GetAllMiniBlockDstMeFromMeta( ) (map[string][]byte, error) { return sp.getAllMiniBlockDstMeFromMeta(round, metaHashes) } + +func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + return sp.isMiniBlockProcessed(metaBlockHash, miniBlockHash) +} + +func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { + sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 5889aa8e551..8c4b0d66ae7 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -36,6 +36,9 @@ type shardProcessor struct { currHighestMetaHdrNonce uint64 allNeededMetaHdrsFound bool + processedMiniBlocks map[string]map[string]struct{} + mutProcessedMiniBlocks sync.RWMutex + core serviceContainer.Core txCoordinator process.TransactionCoordinator txCounter *transactionCounter @@ -115,6 +118,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { sp.requestedMetaHdrsHashes = make(map[string]bool) sp.usedMetaHdrsHashes = make(map[uint64][][]byte) + sp.processedMiniBlocks = make(map[string]map[string]struct{}) metaBlockPool := sp.dataPool.MetaBlocks() if metaBlockPool == nil { @@ -533,7 +537,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui processedMiniBlocks := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for mbHash := range processedMiniBlocks { - metaBlock.SetMiniBlockProcessed([]byte(mbHash), true) + sp.addProcessedMiniBlock(metaBlockHash, []byte(mbHash)) } metaBlockPool.Put(metaBlockHash, &metaBlock) @@ -577,7 +581,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui continue } - hdr.SetMiniBlockProcessed([]byte(key), false) + sp.removeProcessedMiniBlock(metaBlockKey, []byte(key)) } } @@ -719,7 +723,7 @@ func (sp *shardProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } - errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) + errNotCritical = sp.removeProcessedMetaBlocksFromPool(processedMetaHdrs) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } @@ -882,7 +886,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) continue } - metaBlock.SetMiniBlockProcessed(miniBlockHashes[key], true) + sp.addProcessedMiniBlock(metaBlockKey, miniBlockHashes[key]) delete(miniBlockHashes, key) } @@ -890,7 +894,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) processedAll := true for key := range crossMiniBlockHashes { - if !metaBlock.GetMiniBlockProcessed([]byte(key)) { + if !sp.isMiniBlockProcessed(metaBlockKey, []byte(key)) { processedAll = false break } @@ -904,7 +908,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) return processedMetaHdrs, nil } -func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { +func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return err @@ -947,6 +951,7 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] sp.dataPool.MetaBlocks().Remove(headerHash) sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) + sp.removeAllProcessedMiniBlocks(headerHash) log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", hdr.GetRound(), @@ -1353,8 +1358,10 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + processedMiniBlocksHashes := sp.getProcessedMiniBlocksHashes(orderedMetaBlocks[i].hash) currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( hdr, + processedMiniBlocksHashes, uint32(maxTxSpaceRemained), uint32(maxMbSpaceRemained), round, @@ -1588,3 +1595,58 @@ func (sp *shardProcessor) IsInterfaceNil() bool { } return false } + +func (sp *shardProcessor) addProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] + if !ok { + miniBlocksProcessed := make(map[string]struct{}) + miniBlocksProcessed[string(miniBlockHash)] = struct{}{} + sp.processedMiniBlocks[string(metaBlockHash)] = miniBlocksProcessed + sp.mutProcessedMiniBlocks.Unlock() + return + } + + miniBlocksProcessed[string(miniBlockHash)] = struct{}{} + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) removeProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] + if !ok { + sp.mutProcessedMiniBlocks.Unlock() + return + } + + delete(miniBlocksProcessed, string(miniBlockHash)) + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) removeAllProcessedMiniBlocks(metaBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + delete(sp.processedMiniBlocks, string(metaBlockHash)) + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) getProcessedMiniBlocksHashes(metaBlockHash []byte) map[string]struct{} { + sp.mutProcessedMiniBlocks.RLock() + processedMiniBlocksHashes := sp.processedMiniBlocks[string(metaBlockHash)] + sp.mutProcessedMiniBlocks.RUnlock() + + return processedMiniBlocksHashes +} + +func (sp *shardProcessor) isMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + sp.mutProcessedMiniBlocks.RLock() + miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] + if !ok { + sp.mutProcessedMiniBlocks.RUnlock() + return false + } + + _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] + sp.mutProcessedMiniBlocks.RUnlock() + + return isProcessed +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 390ec1f7f78..e565c25d3f6 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2931,13 +2931,11 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { assert.Nil(t, err) //check WasMiniBlockProcessed for remaining metablocks - metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) - assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) - assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) + assert.True(t, bp.IsMiniBlockProcessed(mb2Hash, miniblockHashes[2])) + assert.False(t, bp.IsMiniBlockProcessed(mb2Hash, miniblockHashes[3])) - metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) + assert.False(t, bp.IsMiniBlockProcessed(mb3Hash, miniblockHashes[4])) + assert.False(t, bp.IsMiniBlockProcessed(mb3Hash, miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { @@ -3041,22 +3039,32 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { metablockHash := []byte("meta block hash 1") metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - metablockHeader.SetMiniBlockProcessed(metablockHash, true) dataPool.MetaBlocks().Put( metablockHash, metablockHeader, ) - err = sp.RestoreBlockIntoPools(&block.Header{}, body) + store.GetCalled = func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + return marshalizerMock.Marshal(metablockHeader) + } + + miniBlockHeader := block.MiniBlockHeader{ + Hash: miniblockHash, + SenderShardID: miniblock.SenderShardID, + ReceiverShardID: miniblock.ReceiverShardID, + } + + metaBlockHashes := make([][]byte, 0) + metaBlockHashes = append(metaBlockHashes, metablockHash) + + err = sp.RestoreBlockIntoPools(&block.Header{MetaBlockHashes: [][]byte{metablockHash}, MiniBlockHeaders: []block.MiniBlockHeader{miniBlockHeader}}, body) miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) - metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) - metablock := metablockFromPool.(*block.MetaBlock) assert.Nil(t, err) assert.Equal(t, &miniblock, miniblockFromPool) assert.Equal(t, &tx, txFromPool) - assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) + assert.Equal(t, false, sp.IsMiniBlockProcessed(metablockHash, miniblockHash)) } func TestShardProcessor_DecodeBlockBody(t *testing.T) { @@ -3293,7 +3301,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) @@ -3314,7 +3322,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) @@ -3336,7 +3344,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 4, putCalledNr) @@ -3496,7 +3504,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 2, putCalledNr) @@ -3642,7 +3650,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 4, putCalledNr) @@ -4143,7 +4151,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { Nonce: 1, ShardInfo: shardInfos, } - meta.SetMiniBlockProcessed(testMBHash, true) + hasher := &mock.HasherStub{} metaBytes, _ := marshalizer.Marshal(meta) @@ -4151,6 +4159,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { return []byte("cool") } metaHash := hasher.Compute(string(metaBytes)) + sp.AddProcessedMiniBlock(metaHash, testMBHash) metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) @@ -4176,5 +4185,5 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { assert.Equal(t, meta, metaBlockRestored) assert.Nil(t, err) - assert.True(t, meta.GetMiniBlockProcessed(testMBHash)) + assert.True(t, sp.IsMiniBlockProcessed(metaHash, testMBHash)) } diff --git a/process/constants.go b/process/constants.go index b7653e18c3f..ef837a3b01f 100644 --- a/process/constants.go +++ b/process/constants.go @@ -34,6 +34,7 @@ const MaxHeaderRequestsAllowed = 10 const MaxItemsInBlock = 15000 const MinItemsInBlock = 1000 const MaxNoncesDifference = 5 +const GenesisBlockNonce = 0 // TODO - calculate exactly in case of the VM, for every VM to have a similar constant, operations / seconds const MaxGasLimitPerMiniBlock = uint64(100000) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index d29817d57f9..0f44376fd3a 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -358,6 +358,7 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( hdr data.HeaderHandler, + processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, @@ -365,7 +366,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe ) (block.MiniBlockSlice, uint32, bool) { miniBlocks := make(block.MiniBlockSlice, 0) nrTxAdded := uint32(0) - nrMBprocessed := 0 + nrMiniBlocksProcessed := 0 if hdr == nil || hdr.IsInterfaceNil() { return miniBlocks, nrTxAdded, true @@ -377,8 +378,9 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe break } - if hdr.GetMiniBlockProcessed([]byte(key)) { - nrMBprocessed++ + _, ok := processedMiniBlocksHashes[key] + if ok { + nrMiniBlocksProcessed++ continue } @@ -417,7 +419,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe // all txs processed, add to processed miniblocks miniBlocks = append(miniBlocks, miniBlock) nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) - nrMBprocessed++ + nrMiniBlocksProcessed++ mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining if mbOverFlow { @@ -425,7 +427,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe } } - allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) + allMBsProcessed := nrMiniBlocksProcessed == len(crossMiniBlockHashes) return miniBlocks, nrTxAdded, allMBsProcessed } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 34fce3950cd..f4f43b6bc2c 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -575,7 +575,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi haveTime := func() bool { return true } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -623,7 +623,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo haveTime := func() bool { return false } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -649,7 +649,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing haveTime := func() bool { return true } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -712,7 +712,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes } } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 1, len(mbs)) assert.Equal(t, uint32(1), txs) diff --git a/process/interface.go b/process/interface.go index 8786577153b..c4abb19cb6a 100644 --- a/process/interface.go +++ b/process/interface.go @@ -61,7 +61,7 @@ type TransactionCoordinator interface { ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) diff --git a/process/mock/transactionCoordinatorMock.go b/process/mock/transactionCoordinatorMock.go index 34746504a93..a1b71e7ff8e 100644 --- a/process/mock/transactionCoordinatorMock.go +++ b/process/mock/transactionCoordinatorMock.go @@ -18,7 +18,7 @@ type TransactionCoordinatorMock struct { RemoveBlockDataFromPoolCalled func(body block.Body) error ProcessBlockTransactionCalled func(body block.Body, round uint64, haveTime func() time.Duration) error CreateBlockStartedCalled func() - CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) CreateMbsAndProcessTransactionsFromMeCalled func(maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler @@ -97,12 +97,12 @@ func (tcm *TransactionCoordinatorMock) CreateBlockStarted() { tcm.CreateBlockStartedCalled() } -func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) { +func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) { if tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled == nil { return nil, 0, false } - return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, maxTxRemaining, maxMbRemaining, round, haveTime) + return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksHashes, maxTxRemaining, maxMbRemaining, round, haveTime) } func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessTransactionsFromMe(maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) block.MiniBlockSlice { diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index 334f8d0fdae..7e3faa1cf7c 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -82,8 +82,8 @@ func (sfd *shardForkDetector) AddHeader( func (sfd *shardForkDetector) addFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { finalCheckpointWasSet := false for i := 0; i < len(finalHeaders); i++ { - isFinalHeaderNonceHigherThanCurrent := finalHeaders[i].GetNonce() > sfd.GetHighestFinalBlockNonce() - if isFinalHeaderNonceHigherThanCurrent { + isFinalHeaderNonceHigherThanGenesis := finalHeaders[i].GetNonce() > process.GenesisBlockNonce + if isFinalHeaderNonceHigherThanGenesis { if !finalCheckpointWasSet { sfd.setFinalCheckpoint(&checkpointInfo{nonce: finalHeaders[i].GetNonce(), round: finalHeaders[i].GetRound()}) finalCheckpointWasSet = true From ddb77380f5dadee8816e1ff83c96fc2d9cf35514 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Sep 2019 13:30:39 +0300 Subject: [PATCH 154/234] Trailing comma in cross block height string --- process/block/metablock.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/process/block/metablock.go b/process/block/metablock.go index c46dbeac945..1c0c321e31d 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -598,6 +598,9 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) } + if len(crossCheckBlockHeight) > 0 { // append a trailing comma for better fetching of data + crossCheckBlockHeight += fmt.Sprintf(",") + } mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) } From 0e82aa38ea1f55d77e1bc8ada415458520bf3bb4 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Sep 2019 14:38:51 +0300 Subject: [PATCH 155/234] refactoring --- process/block/metablock.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index 1c0c321e31d..f3bcd8f448a 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -592,14 +592,7 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { continue } - if i > 0 { - crossCheckBlockHeight += ", " - } - - crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) - } - if len(crossCheckBlockHeight) > 0 { // append a trailing comma for better fetching of data - crossCheckBlockHeight += fmt.Sprintf(",") + crossCheckBlockHeight += fmt.Sprintf("%d: %d, ", i, valueStored) } mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) From 56b8f49686b9fc44145fdb97c360b16801a35a8e Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Sep 2019 15:40:48 +0300 Subject: [PATCH 156/234] EN-4178: StorageUnit's Put() method fix --- storage/storageUnit/storageunit.go | 7 +------ storage/storageUnit/storageunit_test.go | 11 +++++++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index 51d926d1c71..63c925d5ad9 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -97,12 +97,6 @@ func (s *Unit) Put(key, data []byte) error { s.lock.Lock() defer s.lock.Unlock() - // no need to add if already present in cache - has := s.cacher.Has(key) - if has { - return nil - } - s.cacher.Put(key, data) err := s.persister.Put(key, data) @@ -172,6 +166,7 @@ func (s *Unit) Has(key []byte) error { // it updates the cache either way // it returns if the value was originally found func (s *Unit) HasOrAdd(key []byte, value []byte) error { + // TODO : refactor this method as not all edge cases seem to be treated s.lock.Lock() defer s.lock.Unlock() diff --git a/storage/storageUnit/storageunit_test.go b/storage/storageUnit/storageunit_test.go index ceb63278697..f7e32aa7d6d 100644 --- a/storage/storageUnit/storageunit_test.go +++ b/storage/storageUnit/storageunit_test.go @@ -159,17 +159,20 @@ func TestPutNotPresentCacheWithNilBloomFilter(t *testing.T) { assert.Nil(t, err, "expected to find key %s, but not found", key) } -func TestPutPresent(t *testing.T) { +func TestPutPresentShouldOverwriteValue(t *testing.T) { key, val := []byte("key2"), []byte("value2") s := initStorageUnitWithBloomFilter(t, 10) err := s.Put(key, val) assert.Nil(t, err, "no error expected but got %s", err) - // put again same value, no error expected - err = s.Put(key, val) - + newVal := []byte("value5") + err = s.Put(key, newVal) assert.Nil(t, err, "no error expected but got %s", err) + + returnedVal, err := s.Get(key) + assert.Nil(t, err) + assert.Equal(t, newVal, returnedVal) } func TestPutPresentWithNilBloomFilter(t *testing.T) { From 2fc823b99b8ac69156fbf674f2d6fd6c83ab638d Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Fri, 27 Sep 2019 16:34:22 +0300 Subject: [PATCH 157/234] EN-4178: removed HasOrAdd for Storer --- data/mock/storerStub.go | 5 -- dataRetriever/mock/storerStub.go | 5 -- process/mock/storerStub.go | 5 -- storage/interface.go | 1 - storage/storageUnit/storageunit.go | 43 ---------------- storage/storageUnit/storageunit_test.go | 66 ------------------------- 6 files changed, 125 deletions(-) diff --git a/data/mock/storerStub.go b/data/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/data/mock/storerStub.go +++ b/data/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/dataRetriever/mock/storerStub.go b/dataRetriever/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/dataRetriever/mock/storerStub.go +++ b/dataRetriever/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/process/mock/storerStub.go b/process/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/process/mock/storerStub.go +++ b/process/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/storage/interface.go b/storage/interface.go index 6ae9a52618e..4fb8069d1b2 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -83,7 +83,6 @@ type Storer interface { Put(key, data []byte) error Get(key []byte) ([]byte, error) Has(key []byte) error - HasOrAdd(key []byte, value []byte) error Remove(key []byte) error ClearCache() DestroyUnit() error diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index 63c925d5ad9..571676e8d0c 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -162,49 +162,6 @@ func (s *Unit) Has(key []byte) error { return storage.ErrKeyNotFound } -// HasOrAdd checks if the key is present in the storage and if not adds it. -// it updates the cache either way -// it returns if the value was originally found -func (s *Unit) HasOrAdd(key []byte, value []byte) error { - // TODO : refactor this method as not all edge cases seem to be treated - s.lock.Lock() - defer s.lock.Unlock() - - has := s.cacher.Has(key) - if has { - return nil - } - - if s.bloomFilter == nil || s.bloomFilter.MayContain(key) == true { - err := s.persister.Has(key) - if err != nil { - //add it to the cache - s.cacher.Put(key, value) - - // add it also to the persistence unit - err = s.persister.Put(key, value) - if err != nil { - //revert adding to the cache - s.cacher.Remove(key) - } - } - - return err - } - - s.cacher.Put(key, value) - - err := s.persister.Put(key, value) - if err != nil { - s.cacher.Remove(key) - return err - } - - s.bloomFilter.Add(key) - - return nil -} - // Remove removes the data associated to the given key from both cache and persistence medium func (s *Unit) Remove(key []byte) error { s.lock.Lock() diff --git a/storage/storageUnit/storageunit_test.go b/storage/storageUnit/storageunit_test.go index f7e32aa7d6d..14b7a460041 100644 --- a/storage/storageUnit/storageunit_test.go +++ b/storage/storageUnit/storageunit_test.go @@ -330,72 +330,6 @@ func TestHasPresentWithNilBloomFilter(t *testing.T) { assert.Nil(t, err, "expected no error, but got %s", err) } -func TestHasOrAddNotPresent(t *testing.T) { - key, val := []byte("key9"), []byte("value9") - s := initStorageUnitWithBloomFilter(t, 10) - err := s.HasOrAdd(key, val) - - assert.Nil(t, err) - err = s.Has(key) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentWithNilBloomFilter(t *testing.T) { - key, val := []byte("key9"), []byte("value9") - s := initStorageUnitWithNilBloomFilter(t, 10) - err := s.HasOrAdd(key, val) - - assert.Nil(t, err) - err = s.Has(key) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentCache(t *testing.T) { - key, val := []byte("key10"), []byte("value10") - s := initStorageUnitWithBloomFilter(t, 10) - err := s.Put(key, val) - - s.ClearCache() - - err = s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentCacheWithNilBloomFilter(t *testing.T) { - key, val := []byte("key10"), []byte("value10") - s := initStorageUnitWithNilBloomFilter(t, 10) - err := s.Put(key, val) - - s.ClearCache() - - err = s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddPresent(t *testing.T) { - key, val := []byte("key11"), []byte("value11") - s := initStorageUnitWithBloomFilter(t, 10) - _ = s.Put(key, val) - - err := s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddPresentWithNilBloomFilter(t *testing.T) { - key, val := []byte("key11"), []byte("value11") - s := initStorageUnitWithNilBloomFilter(t, 10) - _ = s.Put(key, val) - - err := s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - func TestDeleteNotPresent(t *testing.T) { key := []byte("key12") s := initStorageUnitWithBloomFilter(t, 10) From b0bbdc0a1976d50f07fd4ce3bd77c123c5d34562 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 27 Sep 2019 18:21:46 +0300 Subject: [PATCH 158/234] process, integrationTests: fixes after merge --- integrationTests/sync/basicSync_test.go | 4 +- .../block/preprocess/rewardTxPreProcessor.go | 46 +++++++++---------- .../preprocess/rewardTxPreProcessor_test.go | 3 +- .../preprocess/smartContractResults_test.go | 5 +- process/block/shardblock.go | 7 ++- 5 files changed, 32 insertions(+), 33 deletions(-) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 8ed852a0439..18705efa9d8 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -149,8 +149,8 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { time.Sleep(stepSync) - pubKeysVariant1 := []byte("1") - pubKeysVariant2 := []byte("2") + pubKeysVariant1 := []byte{3} + pubKeysVariant2 := []byte{1} proposeBlockWithPubKeyBitmap(nodes[idxProposerShard0], round, nonce, pubKeysVariant1) proposeBlockWithPubKeyBitmap(nodes[1], round, nonce, pubKeysVariant2) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 5855d33094b..a291f256b2d 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -132,13 +132,11 @@ func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlo func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( body block.Body, miniBlockPool storage.Cacher, -) (int, map[int][]byte, error) { +) (int, error) { if miniBlockPool == nil { - return 0, nil, process.ErrNilMiniBlockPool + return 0, process.ErrNilMiniBlockPool } - miniBlockHashes := make(map[int][]byte) - rewardTxsRestored := 0 for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -149,14 +147,14 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) if err != nil { - return rewardTxsRestored, miniBlockHashes, err + return rewardTxsRestored, err } for txHash, txBuff := range rewardTxBuff { tx := rewardTx.RewardTx{} err = rtp.marshalizer.Unmarshal(&tx, txBuff) if err != nil { - return rewardTxsRestored, miniBlockHashes, err + return rewardTxsRestored, err } rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) @@ -164,21 +162,19 @@ func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( miniBlockHash, err := core.CalculateHash(rtp.marshalizer, rtp.hasher, miniBlock) if err != nil { - return rewardTxsRestored, miniBlockHashes, err + return rewardTxsRestored, err } - restoredHash := rtp.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + miniBlockPool.Put(miniBlockHash, miniBlock) err = rtp.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { - return rewardTxsRestored, miniBlockHashes, err + return rewardTxsRestored, err } - - miniBlockHashes[i] = restoredHash rewardTxsRestored += len(miniBlock.TxHashes) } - return rewardTxsRestored, miniBlockHashes, nil + return rewardTxsRestored, nil } // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state @@ -204,13 +200,13 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round txHash := miniBlock.TxHashes[j] rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - txInfo := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + txData := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() - if txInfo == nil || txInfo.tx == nil { + if txData == nil || txData.tx == nil { return process.ErrMissingTransaction } - rTx, ok := txInfo.tx.(*rewardTx.RewardTx) + rTx, ok := txData.tx.(*rewardTx.RewardTx) if !ok { return process.ErrWrongTypeAssertion } @@ -233,7 +229,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round // AddComputedRewardMiniBlocks adds to the local cache the reward transactions from the given miniblocks func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { for _, rewardMb := range computedRewardMiniblocks { - txShardInfo := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + txShardData := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} for _, txHash := range rewardMb.TxHashes { tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) if !ok { @@ -248,7 +244,7 @@ func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMinib rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ tx: rTx, - txShardInfo: txShardInfo, + txShardInfo: txShardData, } } } @@ -317,9 +313,9 @@ func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { } func (rtp *rewardTxPreprocessor) setMissingTxsForShard(senderShardID uint32, mbTxHashes *txsHashesInfo) { - txShardInfo := &txShardInfo{senderShardID: senderShardID, receiverShardID: mbTxHashes.receiverShardID} + txShardData := &txShardInfo{senderShardID: senderShardID, receiverShardID: mbTxHashes.receiverShardID} for _, txHash := range mbTxHashes.txHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardData} } } @@ -363,9 +359,9 @@ func (rtp *rewardTxPreprocessor) processRewardTransaction( return err } - txShardInfo := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + txShardData := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} rtp.rewardTxsForBlock.mutTxsForBlock.Lock() - rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardData} rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() return nil @@ -506,11 +502,11 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, ha } } - txShardInfo := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + txShardData := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} rtp.rewardTxsForBlock.mutTxsForBlock.Lock() for index, txHash := range miniBlockTxHashes { - rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardInfo} + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardData} } rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() @@ -532,8 +528,8 @@ func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.Transact rewardTxPool := make(map[string]data.TransactionHandler) rtp.rewardTxsForBlock.mutTxsForBlock.RLock() - for txHash, txInfo := range rtp.rewardTxsForBlock.txHashAndInfo { - rewardTxPool[txHash] = txInfo.tx + for txHash, txData := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txData.tx } rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index d245c3eab50..226b432cc05 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -599,9 +599,8 @@ func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { blockBody = append(blockBody, &mb1) miniBlockPool := mock.NewCacherMock() - numRestoredTxs, resMap, err := rtp.RestoreTxBlockIntoPools(blockBody, miniBlockPool) + numRestoredTxs, err := rtp.RestoreTxBlockIntoPools(blockBody, miniBlockPool) assert.Equal(t, 1, numRestoredTxs) - assert.NotNil(t, resMap) assert.Nil(t, err) } diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 3b8442ab0ae..22851d4d989 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -794,9 +794,8 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { body = append(body, &miniblock) miniblockPool := mock.NewCacherMock() - scrRestored, miniBlockHashes, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + scrRestored, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) - assert.Equal(t, miniBlockHashes[0], []byte(nil)) assert.Equal(t, scrRestored, 1) assert.Nil(t, err) } @@ -826,7 +825,7 @@ func TestScrsPreprocessor__RestoreTxBlockIntoPoolsNilMiniblockPoolShouldErr(t *t miniblockPool := storage.Cacher(nil) - _, _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMiniBlockPool) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 00a25280dbb..6362e474934 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -928,6 +928,11 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) return nil, process.ErrNilMetaBlockHeader } + metaBlock, ok := obj.(*block.MetaBlock) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range crossMiniBlockHashes { if usedMbs[key] { @@ -993,7 +998,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { - processedMBs[hash] = metaBlock.GetMiniBlockProcessed([]byte(hash)) + processedMBs[hash] = sp.isMiniBlockProcessed(metaBlockKey, []byte(hash)) } for key := range miniBlockHashes { From b2b54e0c710c2ae5f239b791eeb14e03db656fe3 Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Fri, 27 Sep 2019 18:25:08 +0300 Subject: [PATCH 159/234] storage: put also overwrites --- storage/storageUnit/storageunit.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index 51d926d1c71..dcabdd450a6 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -97,12 +97,6 @@ func (s *Unit) Put(key, data []byte) error { s.lock.Lock() defer s.lock.Unlock() - // no need to add if already present in cache - has := s.cacher.Has(key) - if has { - return nil - } - s.cacher.Put(key, data) err := s.persister.Put(key, data) From c6dffe05a889bb8cf3dc21a6baed68de9cbf0d54 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 30 Sep 2019 17:13:38 +0300 Subject: [PATCH 160/234] EN-3981 - modify data in elastic search --- consensus/mock/nodesCoordinatorMock.go | 4 ++ .../spos/commonSubround/subroundStartRound.go | 2 +- core/indexer/data.go | 35 ++++++++------ core/indexer/elasticsearch.go | 46 ++++++++++--------- core/indexer/elasticsearch_test.go | 6 ++- core/indexer/interface.go | 2 +- core/mock/indexerMock.go | 2 +- data/mock/nodesCoordinatorMock.go | 8 ++++ integrationTests/mock/nodesCoordinatorMock.go | 4 ++ node/mock/indexerMock.go | 2 +- node/mock/nodesCoordinatorMock.go | 4 ++ process/block/shardblock.go | 37 ++++++++++----- process/mock/indexerMock.go | 2 +- process/mock/nodesCoordinatorMock.go | 4 ++ sharding/indexHashedNodesCoordinator.go | 16 +++++++ sharding/interface.go | 1 + 16 files changed, 121 insertions(+), 54 deletions(-) diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go index be134a61934..fa0ce8502ee 100644 --- a/consensus/mock/nodesCoordinatorMock.go +++ b/consensus/mock/nodesCoordinatorMock.go @@ -41,6 +41,10 @@ func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byt return nil } +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { if ncm.GetValidatorsPublicKeysCalled != nil { return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 5094dbd6456..5979d107017 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -224,7 +224,7 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { } round := sr.Rounder().Index() - go sr.indexer.SaveRoundInfo(round, shardId, signersIndexes) + go sr.indexer.SaveRoundInfo(round, shardId, signersIndexes, false) } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/core/indexer/data.go b/core/indexer/data.go index 53dcc57f2db..b6bb7c0812c 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -31,25 +31,30 @@ type Transaction struct { // to be saved for a block. It has all the default fields // plus some extra information for ease of search and filter type Block struct { - Nonce uint64 `json:"nonce"` - Round uint64 `json:"round"` - ShardID uint32 `json:"shardId"` - Hash string `json:"hash"` - Proposer string `json:"proposer"` - Validators []string `json:"validators"` - PubKeyBitmap string `json:"pubKeyBitmap"` - Size int64 `json:"size"` - Timestamp time.Duration `json:"timestamp"` - TxCount uint32 `json:"txCount"` - StateRootHash string `json:"stateRootHash"` - PrevHash string `json:"prevHash"` - SignersIndexes []uint64 `json:"signersIndexes"` + Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` + ShardID uint32 `json:"shardId"` + Hash string `json:"hash"` + Proposer uint64 `json:"proposer"` + Validators []uint64 `json:"validators"` + PubKeyBitmap string `json:"pubKeyBitmap"` + Size int64 `json:"size"` + Timestamp time.Duration `json:"timestamp"` + TxCount uint32 `json:"txCount"` + StateRootHash string `json:"stateRootHash"` + PrevHash string `json:"prevHash"` +} + +//ValidatorsPublicKeys is a structure containing fields for validator public keys +type ValidatorsPublicKeys struct { + PublicKeys []string `json:"publicKeys"` } // RoundInfo is a structure containing block signers and shard id type RoundInfo struct { - SignersIndexes []uint64 `json:"signersIndexes"` - ShardId uint32 `json:"shardId"` + SignersIndexes []uint64 `json:"signersIndexes"` + BlockWasProposed bool `json:"blockWasProposed"` + ShardId uint32 `json:"shardId"` } // TPS is a structure containing all the fields that need to diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 8860dd720d2..68d69781621 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -239,12 +239,13 @@ func (ei *elasticIndexer) SaveBlock( } // SaveRoundInfo will save data about a round on elastic -func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { +func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { var buff bytes.Buffer roundInfo := RoundInfo{ - SignersIndexes: signersIndexes, - ShardId: shardId, + SignersIndexes: signersIndexes, + BlockWasProposed: blockWasProposed, + ShardId: shardId, } marshalizedSignersIndexes, err := ei.marshalizer.Marshal(roundInfo) @@ -278,16 +279,21 @@ func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersInde //SaveValidatorsPubKeys will send all validators public keys to elastic search func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { - var buff bytes.Buffer - valPubKeys := make(map[uint32][]string, 0) for shardId, shardPubKeys := range validatorsPubKeys { for _, pubKey := range shardPubKeys { valPubKeys[shardId] = append(valPubKeys[shardId], hex.EncodeToString(pubKey)) } + + go ei.saveShardValidatorsPubKeys(shardId, valPubKeys[shardId]) } +} - marshalizedValidatorPubKeys, err := ei.marshalizer.Marshal(valPubKeys) +func (ei *elasticIndexer) saveShardValidatorsPubKeys(shardId uint32, shardValidatorsPubKeys []string) { + var buff bytes.Buffer + + shardValPubKeys := ValidatorsPublicKeys{PublicKeys: shardValidatorsPubKeys} + marshalizedValidatorPubKeys, err := ei.marshalizer.Marshal(shardValPubKeys) if err != nil { ei.logger.Warn("could not marshal validators public keys") return @@ -298,7 +304,7 @@ func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][ req := esapi.IndexRequest{ Index: validatorsIndex, - DocumentID: "validators_list", + DocumentID: strconv.FormatUint(uint64(shardId), 10), Body: bytes.NewReader(buff.Bytes()), Refresh: "true", } @@ -325,20 +331,18 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea headerHash := ei.hasher.Compute(string(h)) elasticBlock := Block{ - Nonce: header.GetNonce(), - Round: header.GetRound(), - ShardID: header.GetShardID(), - Hash: hex.EncodeToString(headerHash), - // TODO: We should add functionality for proposer and validators - Proposer: hex.EncodeToString([]byte("mock proposer")), - //Validators: "mock validators", - PubKeyBitmap: hex.EncodeToString(header.GetPubKeysBitmap()), - Size: int64(len(h)), - Timestamp: time.Duration(header.GetTimeStamp()), - TxCount: header.GetTxCount(), - StateRootHash: hex.EncodeToString(header.GetRootHash()), - PrevHash: hex.EncodeToString(header.GetPrevHash()), - SignersIndexes: signersIndexes, + Nonce: header.GetNonce(), + Round: header.GetRound(), + ShardID: header.GetShardID(), + Hash: hex.EncodeToString(headerHash), + Proposer: signersIndexes[0], + Validators: signersIndexes, + PubKeyBitmap: hex.EncodeToString(header.GetPubKeysBitmap()), + Size: int64(len(h)), + Timestamp: time.Duration(header.GetTimeStamp()), + TxCount: header.GetTxCount(), + StateRootHash: hex.EncodeToString(header.GetRootHash()), + PrevHash: hex.EncodeToString(header.GetPrevHash()), } serializedBlock, err := json.Marshal(elasticBlock) diff --git a/core/indexer/elasticsearch_test.go b/core/indexer/elasticsearch_test.go index 51fc51ab3ee..a16007906b8 100644 --- a/core/indexer/elasticsearch_test.go +++ b/core/indexer/elasticsearch_test.go @@ -267,8 +267,9 @@ func TestNewElasticIndexerIncorrectUrl(t *testing.T) { func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { ei := indexer.NewTestElasticIndexer(url, username, password, shardCoordinator, marshalizer, hasher, log, &indexer.Options{}) header := newTestBlockHeader() + signersIndexes := []uint64{0, 1, 2, 3} - serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header, nil) + serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header, signersIndexes) h, _ := marshalizer.Marshal(header) expectedHeaderHash := hasher.Compute(string(h)) @@ -279,7 +280,8 @@ func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { Round: header.Round, ShardID: header.ShardId, Hash: hex.EncodeToString(headerHash), - Proposer: hex.EncodeToString([]byte("mock proposer")), + Proposer: signersIndexes[0], + Validators: signersIndexes, PubKeyBitmap: hex.EncodeToString(header.PubKeysBitmap), Size: int64(len(h)), Timestamp: time.Duration(header.TimeStamp), diff --git a/core/indexer/interface.go b/core/indexer/interface.go index 311a4a9de6f..302510e53e4 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,7 +9,7 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) - SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) + SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 5440361c6be..d7845cc88c9 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -19,7 +19,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { panic("implement me") } diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go index 56143df1eac..c5f14b54528 100644 --- a/data/mock/nodesCoordinatorMock.go +++ b/data/mock/nodesCoordinatorMock.go @@ -56,6 +56,14 @@ func NewNodesCoordinatorMock() *NodesCoordinatorMock { } } +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { if ncm.GetSelectedPublicKeysCalled != nil { return ncm.GetSelectedPublicKeysCalled(selection, shardId) diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go index bd27c139d30..d591a93f6e1 100644 --- a/integrationTests/mock/nodesCoordinatorMock.go +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -14,6 +14,10 @@ func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byt return nil } +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( randomness []byte, round uint64, diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go index 5440361c6be..d7845cc88c9 100644 --- a/node/mock/indexerMock.go +++ b/node/mock/indexerMock.go @@ -19,7 +19,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { panic("implement me") } diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go index 74c7a73bb12..3388ae1da14 100644 --- a/node/mock/nodesCoordinatorMock.go +++ b/node/mock/nodesCoordinatorMock.go @@ -106,6 +106,10 @@ func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sh panic("implement me") } +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { if ncm == nil { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index e9bd23b386d..9bcb7059182 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1,7 +1,6 @@ package block import ( - "bytes" "fmt" "sort" "sync" @@ -509,7 +508,10 @@ func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { func (sp *shardProcessor) indexBlockIfNeeded( body data.BodyHandler, - header data.HeaderHandler) { + header data.HeaderHandler, + lastBlockBody data.BodyHandler, + lastBlockHeader data.HeaderHandler, +) { if sp.core == nil || sp.core.Indexer() == nil { return } @@ -530,18 +532,28 @@ func (sp *shardProcessor) indexBlockIfNeeded( if err != nil { return } - validatorsPubKeys := sp.nodesCoordinator.GetAllValidatorsPublicKeys() - signersIndexes := make([]uint64, 0) - for _, pubKey := range pubKeys { - for index, value := range validatorsPubKeys[shardId] { - if bytes.Equal([]byte(pubKey), value) { - signersIndexes = append(signersIndexes, uint64(index)) - } + signersIndexes := sp.nodesCoordinator.GetValidatorsIndexes(pubKeys) + + go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) + go sp.core.Indexer().SaveRoundInfo(int64(header.GetRound()), shardId, signersIndexes, true) + + lastBlockRound := lastBlockHeader.GetRound() + currentBlockRound := header.GetRound() + + if lastBlockRound > currentBlockRound-1 { + return + } + + for i := lastBlockRound + 1; i < currentBlockRound; i++ { + publicKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(lastBlockHeader.GetRandSeed(), i, shardId) + if err != nil { + continue } + signersIndexes = sp.nodesCoordinator.GetValidatorsIndexes(publicKeys) + go sp.core.Indexer().SaveRoundInfo(int64(i), shardId, signersIndexes, true) } - go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools @@ -811,6 +823,9 @@ func (sp *shardProcessor) CommitBlock( hdrsToAttestPreviousFinal := uint32(header.Nonce-sp.forkDetector.GetHighestFinalBlockNonce()) + 1 sp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) + lastBlockBody := chainHandler.GetCurrentBlockBody() + lastBlockHeader := chainHandler.GetCurrentBlockHeader() + err = chainHandler.SetCurrentBlockBody(body) if err != nil { return err @@ -822,7 +837,7 @@ func (sp *shardProcessor) CommitBlock( } chainHandler.SetCurrentBlockHeaderHash(headerHash) - sp.indexBlockIfNeeded(bodyHandler, headerHandler) + sp.indexBlockIfNeeded(bodyHandler, headerHandler, lastBlockBody, lastBlockHeader) go sp.cleanTxsPools() diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index e332c3c6f3d..fe1528bec1f 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -20,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64) { +func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { panic("implement me") } diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go index 05f8ebc1083..72ee4c72df3 100644 --- a/process/mock/nodesCoordinatorMock.go +++ b/process/mock/nodesCoordinatorMock.go @@ -54,6 +54,10 @@ func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byt return nil } +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { if ncm.GetSelectedPublicKeysCalled != nil { return ncm.GetSelectedPublicKeysCalled(selection, shardId) diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 8c0f567655c..bb724bc5fa0 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -239,6 +239,22 @@ func (ihgs *indexHashedNodesCoordinator) GetAllValidatorsPublicKeys() map[uint32 return validatorsPubKeys } +// GetValidatorsIndexes will return validators indexes for a block +func (ihgs *indexHashedNodesCoordinator) GetValidatorsIndexes(publicKeys []string) []uint64 { + validatorsPubKeys := ihgs.GetAllValidatorsPublicKeys() + signersIndexes := make([]uint64, 0) + + for _, pubKey := range publicKeys { + for index, value := range validatorsPubKeys[ihgs.shardId] { + if bytes.Equal([]byte(pubKey), value) { + signersIndexes = append(signersIndexes, uint64(index)) + } + } + } + + return signersIndexes +} + func (ihgs *indexHashedNodesCoordinator) expandEligibleList(shardId uint32) []Validator { //TODO implement an expand eligible list variant return ihgs.nodesMap[shardId] diff --git a/sharding/interface.go b/sharding/interface.go index 84487325e99..b567d2490bb 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -39,6 +39,7 @@ type NodesCoordinator interface { // PublicKeysSelector allows retrieval of eligible validators public keys type PublicKeysSelector interface { + GetValidatorsIndexes(publicKeys []string) []uint64 GetAllValidatorsPublicKeys() map[uint32][][]byte GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) From 56fc2ba0db7c5e0ae4df83002d4a23f2c15af07e Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 30 Sep 2019 17:16:35 +0300 Subject: [PATCH 161/234] EN-4170 : fix failing tests --- consensus/spos/commonSubround/subroundStartRound.go | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 5979d107017..5d0b0d1360f 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -1,7 +1,6 @@ package commonSubround import ( - "bytes" "encoding/hex" "fmt" "time" @@ -211,18 +210,8 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { return } - validatorsPubKeys := sr.NodesCoordinator().GetAllValidatorsPublicKeys() shardId := sr.ShardCoordinator().SelfId() - signersIndexes := make([]uint64, 0) - - for _, pubKey := range pubKeys { - for index, value := range validatorsPubKeys[shardId] { - if bytes.Equal([]byte(pubKey), value) { - signersIndexes = append(signersIndexes, uint64(index)) - } - } - } - + signersIndexes := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys) round := sr.Rounder().Index() go sr.indexer.SaveRoundInfo(round, shardId, signersIndexes, false) } From f28bcf1c534e1e810eb7e9a9fd94780ff94af2da Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 1 Oct 2019 09:58:51 +0300 Subject: [PATCH 162/234] EN-1754: fixes after review --- .../block/executingRewardMiniblocks_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 9c4d16ce703..c0ab6298109 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -19,6 +19,11 @@ import ( "github.com/stretchr/testify/assert" ) +func getRewardValue() uint32 { + //TODO: this should be read from protocol config + return uint32(1000) +} + func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -319,9 +324,7 @@ func verifyRewardsForMetachain( mapRewardsForMeta map[string]uint32, nodes map[uint32][]*integrationTests.TestProcessorNode, ) { - - // TODO this should be read from protocol config - rewardValue := uint32(1000) + rewardValue := getRewardValue() for metaAddr, numOfTimesRewarded := range mapRewardsForMeta { addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(metaAddr)) @@ -341,9 +344,8 @@ func verifyRewardsForShards( gasPrice uint64, gasLimit uint64, ) { - - // TODO: rewards and fee percentage should be read from protocol config - rewardValue := 1000 + rewardValue := getRewardValue() + // TODO: fee percentage should be read from protocol config feePerTxForLeader := gasPrice * gasLimit / 2 for address, nbRewards := range mapRewardsForAddress { From 7eba3d2f3b21e2b4c8dbadd100fa1abc400e4b21 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 1 Oct 2019 11:19:16 +0300 Subject: [PATCH 163/234] EN-3981 : fix failing tests --- .../spos/commonSubround/subroundStartRound.go | 4 +- core/indexer/elasticsearch.go | 7 +++ core/indexer/export_test.go | 2 +- core/indexer/interface.go | 1 + core/indexer/nilIndexer.go | 47 +++++++++++++++++++ core/mock/indexerMock.go | 4 ++ node/mock/indexerMock.go | 4 ++ process/block/shardblock.go | 12 ++--- process/mock/indexerMock.go | 6 ++- 9 files changed, 76 insertions(+), 11 deletions(-) create mode 100644 core/indexer/nilIndexer.go diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 5d0b0d1360f..f00fb343131 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -50,7 +50,7 @@ func NewSubroundStartRound( executeStoredMessages, broadcastUnnotarisedBlocks, statusHandler.NewNilStatusHandler(), - nil, + indexer.NewNilIndexer(), } srStartRound.Job = srStartRound.doStartRoundJob srStartRound.Check = srStartRound.doStartRoundConsensusCheck @@ -206,7 +206,7 @@ func (sr *SubroundStartRound) initCurrentRound() bool { } func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { - if sr.indexer == nil || sr.IsInterfaceNil() { + if sr.indexer == nil || sr.indexer.IsNilIndexer() { return } diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 68d69781621..f1ece77a0dc 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -53,6 +53,7 @@ type elasticIndexer struct { hasher hashing.Hasher logger *logger.Logger options *Options + isNilIndexer bool } // NewElasticIndexer creates a new elasticIndexer where the server listens on the url, authentication for the server is @@ -96,6 +97,7 @@ func NewElasticIndexer( hasher, logger, options, + false, } err = indexer.checkAndCreateIndex(blockIndex, timestampMapping()) @@ -289,6 +291,11 @@ func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][ } } +// IsNilIndexer will return a bool value that signal if its a nil indexer +func (ei *elasticIndexer) IsNilIndexer() bool { + return ei.isNilIndexer +} + func (ei *elasticIndexer) saveShardValidatorsPubKeys(shardId uint32, shardValidatorsPubKeys []string) { var buff bytes.Buffer diff --git a/core/indexer/export_test.go b/core/indexer/export_test.go index ae0004c0a8d..89808723eac 100644 --- a/core/indexer/export_test.go +++ b/core/indexer/export_test.go @@ -37,7 +37,7 @@ func NewTestElasticIndexer( es, _ := elasticsearch.NewClient(cfg) indexer := elasticIndexer{es, shardCoordinator, - marshalizer, hasher, logger, options} + marshalizer, hasher, logger, options, false} return ElasticIndexer{indexer} } diff --git a/core/indexer/interface.go b/core/indexer/interface.go index 302510e53e4..9d9b327ec1d 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -13,4 +13,5 @@ type Indexer interface { UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool + IsNilIndexer() bool } diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go new file mode 100644 index 00000000000..2ae4278beee --- /dev/null +++ b/core/indexer/nilIndexer.go @@ -0,0 +1,47 @@ +package indexer + +import ( + "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/data" +) + +type NilIndexer struct { +} + +// NewNilIndexer will return a Nil indexer +func NewNilIndexer() *NilIndexer { + return new(NilIndexer) +} + +// SaveBlock will do nothing +func (ni *NilIndexer) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { + return +} + +// SaveRoundInfo will do nothing +func (ni *NilIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { + return +} + +// UpdateTPS will do nothing +func (ni *NilIndexer) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { + return +} + +// SaveValidatorsPubKeys will do nothing +func (ni *NilIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + return +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ni *NilIndexer) IsInterfaceNil() bool { + if ni == nil { + return true + } + return false +} + +// IsNilIndexer return if implementation of indexer is a nil implementation +func (ni *NilIndexer) IsNilIndexer() bool { + return true +} diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index d7845cc88c9..e1adb30ed97 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -34,3 +34,7 @@ func (im *IndexerMock) IsInterfaceNil() bool { } return false } + +func (im *IndexerMock) IsNilIndexer() bool { + return false +} diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go index d7845cc88c9..e1adb30ed97 100644 --- a/node/mock/indexerMock.go +++ b/node/mock/indexerMock.go @@ -34,3 +34,7 @@ func (im *IndexerMock) IsInterfaceNil() bool { } return false } + +func (im *IndexerMock) IsNilIndexer() bool { + return false +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 9bcb7059182..5945dcc7e56 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -509,7 +509,6 @@ func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { func (sp *shardProcessor) indexBlockIfNeeded( body data.BodyHandler, header data.HeaderHandler, - lastBlockBody data.BodyHandler, lastBlockHeader data.HeaderHandler, ) { if sp.core == nil || sp.core.Indexer() == nil { @@ -538,13 +537,13 @@ func (sp *shardProcessor) indexBlockIfNeeded( go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) go sp.core.Indexer().SaveRoundInfo(int64(header.GetRound()), shardId, signersIndexes, true) - lastBlockRound := lastBlockHeader.GetRound() - currentBlockRound := header.GetRound() - - if lastBlockRound > currentBlockRound-1 { + if lastBlockHeader == nil { return } + lastBlockRound := lastBlockHeader.GetRound() + currentBlockRound := header.GetRound() + for i := lastBlockRound + 1; i < currentBlockRound; i++ { publicKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(lastBlockHeader.GetRandSeed(), i, shardId) if err != nil { @@ -823,7 +822,6 @@ func (sp *shardProcessor) CommitBlock( hdrsToAttestPreviousFinal := uint32(header.Nonce-sp.forkDetector.GetHighestFinalBlockNonce()) + 1 sp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) - lastBlockBody := chainHandler.GetCurrentBlockBody() lastBlockHeader := chainHandler.GetCurrentBlockHeader() err = chainHandler.SetCurrentBlockBody(body) @@ -837,7 +835,7 @@ func (sp *shardProcessor) CommitBlock( } chainHandler.SetCurrentBlockHeaderHash(headerHash) - sp.indexBlockIfNeeded(bodyHandler, headerHandler, lastBlockBody, lastBlockHeader) + sp.indexBlockIfNeeded(bodyHandler, headerHandler, lastBlockHeader) go sp.cleanTxsPools() diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index fe1528bec1f..5bb645a3d81 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -21,7 +21,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { } func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { - panic("implement me") + return } func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { @@ -35,3 +35,7 @@ func (im *IndexerMock) IsInterfaceNil() bool { } return false } + +func (im *IndexerMock) IsNilIndexer() bool { + return true +} From c3dc3a39cf26650d546418cc4870749b53b316ef Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Tue, 1 Oct 2019 11:45:08 +0300 Subject: [PATCH 164/234] process: fix race in test --- process/rewardTransaction/interceptor_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/process/rewardTransaction/interceptor_test.go b/process/rewardTransaction/interceptor_test.go index e2921faaaaa..5374d7cc68c 100644 --- a/process/rewardTransaction/interceptor_test.go +++ b/process/rewardTransaction/interceptor_test.go @@ -3,6 +3,7 @@ package rewardTransaction_test import ( "encoding/json" "math/big" + "sync/atomic" "testing" "time" @@ -157,12 +158,12 @@ func TestRewardTxInterceptor_ProcessReceivedMessageNilDataShouldErr(t *testing.T func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testing.T) { t.Parallel() - wasCalled := false + wasCalled := int32(0) rti, _ := rewardTransaction.NewRewardTxInterceptor( &mock.MarshalizerMock{}, &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { - wasCalled = true + atomic.StoreInt32(&wasCalled, 1) }, }, &mock.StorerStub{}, @@ -198,14 +199,15 @@ func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testi err := rti.ProcessReceivedMessage(message) time.Sleep(20 * time.Millisecond) + assert.Nil(t, err) - assert.True(t, wasCalled) + assert.Equal(t, int32(1), atomic.LoadInt32(&wasCalled)) } func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *testing.T) { t.Parallel() - wasCalled := false + wasCalled := int32(0) shardCoord := mock.NewMultiShardsCoordinatorMock(3) shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { return uint32(1) @@ -214,7 +216,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes &mock.MarshalizerMock{}, &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { - wasCalled = true + atomic.StoreInt32(&wasCalled, 1) }, }, &mock.StorerStub{}, @@ -252,7 +254,7 @@ func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *tes time.Sleep(20 * time.Millisecond) assert.Nil(t, err) // check that AddData was not called, as tx is cross shard - assert.False(t, wasCalled) + assert.Equal(t, int32(0), atomic.LoadInt32(&wasCalled)) } func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { From 77d09db35ab29d888ed94f851bb80f2a9b8ef677 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 1 Oct 2019 12:39:16 +0300 Subject: [PATCH 165/234] minor code changes in integration test (added/removed empty lines) changed the sync wrappers to call ForkChoice rather than rollback --- integrationTests/interface.go | 4 ++-- integrationTests/sync/common.go | 20 +++++++------------- integrationTests/sync/edgeCases_test.go | 2 +- process/sync/testMetaBootstrap.go | 16 ++++++++++------ process/sync/testShardBootstrap.go | 16 ++++++++++------ 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 455fccfa8be..54768aa04c3 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -2,10 +2,10 @@ package integrationTests import "github.com/ElrondNetwork/elrond-go/process" -// TestBootstrapper extends the Bootstrapper interface with some funcs intended to be used only in tests +// TestBootstrapper extends the Bootstrapper interface with some functions intended to be used only in tests // as it simplifies the reproduction of edge cases type TestBootstrapper interface { process.Bootstrapper - ManualRollback() error + ForkChoice(revertUsingForkNonce bool) error SetProbableHighestNonce(nonce uint64) } diff --git a/integrationTests/sync/common.go b/integrationTests/sync/common.go index 7fe0c676791..076639237fc 100644 --- a/integrationTests/sync/common.go +++ b/integrationTests/sync/common.go @@ -18,7 +18,8 @@ var stepSync = time.Second * 2 func setupSyncNodesOneShardAndMeta( numNodesPerShard int, - numNodesMeta int) ([]*integrationTests.TestProcessorNode, p2p.Messenger, []int) { + numNodesMeta int, +) ([]*integrationTests.TestProcessorNode, p2p.Messenger, []int) { maxShards := uint32(1) shardId := uint32(0) @@ -48,7 +49,7 @@ func setupSyncNodesOneShardAndMeta( ) nodes = append(nodes, metaNode) } - idxProposerMeta := numNodesPerShard + idxProposerMeta := len(nodes) - 1 idxProposers := []int{idxProposerShard0, idxProposerMeta} @@ -110,23 +111,18 @@ func proposeBlocks( } } -func manualRollback(nodes []*integrationTests.TestProcessorNode, shardId uint32, targetNonce uint64) { +func forkChoiceOneBlock(nodes []*integrationTests.TestProcessorNode, shardId uint32) { for idx, n := range nodes { if n.ShardCoordinator.SelfId() != shardId { continue } - - if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { - continue - } - - oldNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() - err := n.Bootstrapper.ManualRollback() + err := n.Bootstrapper.ForkChoice(false) if err != nil { fmt.Println(err) } + newNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() - fmt.Printf("Node's id %d had nonce %d, now is %d\n", idx, oldNonce, newNonce) + fmt.Printf("Node's id %d is at block height %d\n", idx, newNonce) } } @@ -144,7 +140,6 @@ func emptyNodeDataPool(node *integrationTests.TestProcessorNode) { if node.ShardDataPool != nil { emptyShardDataPool(node.ShardDataPool) } - if node.MetaDataPool != nil { emptyMetaDataPool(node.MetaDataPool) } @@ -172,7 +167,6 @@ func resetHighestProbableNonce(nodes []*integrationTests.TestProcessorNode, shar if n.ShardCoordinator.SelfId() != shardId { continue } - if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { continue } diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go index e703862b948..42537b2919e 100644 --- a/integrationTests/sync/edgeCases_test.go +++ b/integrationTests/sync/edgeCases_test.go @@ -49,7 +49,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { ) shardIdToRollbackLastBlock := uint32(0) - manualRollback(nodes, shardIdToRollbackLastBlock, 3) + forkChoiceOneBlock(nodes, shardIdToRollbackLastBlock) resetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 2) emptyDataPools(nodes, shardIdToRollbackLastBlock) diff --git a/process/sync/testMetaBootstrap.go b/process/sync/testMetaBootstrap.go index d035eb9c779..3cd493ae6e4 100644 --- a/process/sync/testMetaBootstrap.go +++ b/process/sync/testMetaBootstrap.go @@ -1,7 +1,5 @@ package sync -import "github.com/ElrondNetwork/elrond-go/data/block" - // TestMetaBootstrap extends MetaBootstrap and is used in integration tests as it exposes some funcs // that are not supposed to be used in production code // Exported funcs simplify the reproduction of edge cases @@ -9,12 +7,18 @@ type TestMetaBootstrap struct { *MetaBootstrap } -// ManualRollback calls the rollback on the current block from the blockchain structure -func (tmb *TestMetaBootstrap) ManualRollback() error { - return tmb.rollback(tmb.blkc.GetCurrentBlockHeader().(*block.MetaBlock)) +// ForkChoice decides to call (or not) the rollback on the current block from the blockchain structure +func (tmb *TestMetaBootstrap) ForkChoice(revertUsingForkNonce bool) error { + return tmb.forkChoice(revertUsingForkNonce) } // SetProbableHighestNonce sets the probable highest nonce in the contained fork detector func (tmb *TestMetaBootstrap) SetProbableHighestNonce(nonce uint64) { - tmb.forkDetector.(*metaForkDetector).setProbableHighestNonce(nonce) + forkDetector, ok := tmb.forkDetector.(*metaForkDetector) + if !ok { + log.Error("inner forkdetector impl is not of type metaForkDetector") + return + } + + forkDetector.setProbableHighestNonce(nonce) } diff --git a/process/sync/testShardBootstrap.go b/process/sync/testShardBootstrap.go index 43de789d2b4..f19556b8df6 100644 --- a/process/sync/testShardBootstrap.go +++ b/process/sync/testShardBootstrap.go @@ -1,7 +1,5 @@ package sync -import "github.com/ElrondNetwork/elrond-go/data/block" - // TestShardBootstrap extends ShardBootstrap and is used in integration tests as it exposes some funcs // that are not supposed to be used in production code // Exported funcs simplify the reproduction of edge cases @@ -9,12 +7,18 @@ type TestShardBootstrap struct { *ShardBootstrap } -// ManualRollback calls the rollback on the current block from the blockchain structure -func (tsb *TestShardBootstrap) ManualRollback() error { - return tsb.rollback(tsb.blkc.GetCurrentBlockHeader().(*block.Header)) +// ForkChoice decides to call (or not) the rollback on the current block from the blockchain structure +func (tsb *TestShardBootstrap) ForkChoice(revertUsingForkNonce bool) error { + return tsb.forkChoice(revertUsingForkNonce) } // SetProbableHighestNonce sets the probable highest nonce in the contained fork detector func (tsb *TestShardBootstrap) SetProbableHighestNonce(nonce uint64) { - tsb.forkDetector.(*shardForkDetector).setProbableHighestNonce(nonce) + forkDetector, ok := tsb.forkDetector.(*shardForkDetector) + if !ok { + log.Error("inner forkdetector impl is not of type shardForkDetector") + return + } + + forkDetector.setProbableHighestNonce(nonce) } From ae531d3e33924a74c79f7f44fdc61693f93cfff9 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 1 Oct 2019 13:30:28 +0300 Subject: [PATCH 166/234] EN-4207 : fix tests that failing with race condition --- p2p/libp2p/issues_test.go | 18 +++++++++++------- process/common_test.go | 17 +++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/p2p/libp2p/issues_test.go b/p2p/libp2p/issues_test.go index 4ab076a1638..861c7e79955 100644 --- a/p2p/libp2p/issues_test.go +++ b/p2p/libp2p/issues_test.go @@ -48,15 +48,19 @@ func createMessenger(port int) p2p.Messenger { // Next message that the sender tries to send will cause a new error to be logged and no data to be sent // The fix consists in the full stream closing when an error occurs during writing. func TestIssueEN898_StreamResetError(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mes1 := createMessenger(23100) mes2 := createMessenger(23101) defer func() { - mes1.Close() - mes2.Close() + _ = mes1.Close() + _ = mes2.Close() }() - mes1.ConnectToPeer(getConnectableAddress(mes2)) + _ = mes1.ConnectToPeer(getConnectableAddress(mes2)) topic := "test topic" @@ -74,8 +78,8 @@ func TestIssueEN898_StreamResetError(t *testing.T) { smallPacketReceived := &atomic.Value{} smallPacketReceived.Store(false) - mes2.CreateTopic(topic, false) - mes2.RegisterMessageProcessor(topic, &mock.MessageProcessorStub{ + _ = mes2.CreateTopic(topic, false) + _ = mes2.RegisterMessageProcessor(topic, &mock.MessageProcessorStub{ ProcessMessageCalled: func(message p2p.MessageP2P) error { if bytes.Equal(message.Data(), largePacket) { largePacketReceived.Store(true) @@ -90,12 +94,12 @@ func TestIssueEN898_StreamResetError(t *testing.T) { }) fmt.Println("sending the large packet...") - mes1.SendToConnectedPeer(topic, largePacket, mes2.ID()) + _ = mes1.SendToConnectedPeer(topic, largePacket, mes2.ID()) time.Sleep(time.Second) fmt.Println("sending the small packet...") - mes1.SendToConnectedPeer(topic, smallPacket, mes2.ID()) + _ = mes1.SendToConnectedPeer(topic, smallPacket, mes2.ID()) time.Sleep(time.Second) diff --git a/process/common_test.go b/process/common_test.go index 5e25f6c8230..6362884b379 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -3,6 +3,7 @@ package process_test import ( "bytes" "sync" + "sync/atomic" "testing" "time" @@ -39,16 +40,16 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { ch := make(chan bool) assert.Equal(t, 0, len(ch)) - readsCnt := process.EmptyChannel(ch) + readsCnt := int32(process.EmptyChannel(ch)) assert.Equal(t, 0, len(ch)) - assert.Equal(t, 0, readsCnt) + assert.Equal(t, int32(0), readsCnt) wg := sync.WaitGroup{} wgChanWasWritten := sync.WaitGroup{} - numConcurrentWrites := 100 - wg.Add(numConcurrentWrites) - wgChanWasWritten.Add(numConcurrentWrites) - for i := 0; i < numConcurrentWrites; i++ { + numConcurrentWrites := int32(100) + wg.Add(int(numConcurrentWrites)) + wgChanWasWritten.Add(int(numConcurrentWrites)) + for i := int32(0); i < numConcurrentWrites; i++ { go func() { wg.Done() time.Sleep(time.Millisecond) @@ -62,7 +63,7 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { go func() { for readsCnt < numConcurrentWrites { - readsCnt += process.EmptyChannel(ch) + atomic.AddInt32(&readsCnt, int32(process.EmptyChannel(ch))) } }() @@ -70,7 +71,7 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { wgChanWasWritten.Wait() assert.Equal(t, 0, len(ch)) - assert.Equal(t, numConcurrentWrites, readsCnt) + assert.Equal(t, numConcurrentWrites, atomic.LoadInt32(&readsCnt)) } func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { From 9e35f2d3e94ab44a08329aaaa6f61d95229feb63 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Tue, 1 Oct 2019 14:34:18 +0300 Subject: [PATCH 167/234] Feat/economics (#375) * consensus: consensusGroupSelection subcomponent becomes nodesCoordinator Add entire nodes to shard mapping for management instead of only the nodes for shard Add functionality for getting the signers for a speciffic randomness and bitmap * consensus: remove bitmap from consensus group public key selection * consensus, sharding: move validator structure and nodes coordinator from consensus to sharding * sharding: move validator back to consensus * started fee implementation * Update capnp schema * started fee implementation * fixing unit tests and integration tests. * feeTxHandler implementation * fixing tests * added tests. * finishing details * process, node, integrationTests: adapt interceptors to validate header signatures on receive Adapt unit tests and integration tests * pushing unit tests. * tests: printfs should end in newline for tests * process, integrationTests: remove obsolete chronology validator * modified capnproto schema for feeTx * fixing capnproto. * consensus, process: add signature verification on metachain headers interceptors * process: fix meta block header interceptor - copy the header not intercepted object fix race on isSynchronized * merge with master and fix tests. * cmd, node, process: Enable interceptor signature verification * started implementation of metachain state * fixes after merge, cleanup. * peer account update * fix after review. * peer account and journalize entries * added fees to burn * peer account factory * fix after review * sharding, consensus, process: Fix review findings Move validator from consensus to sharding Signature verification for metachain blocks should be done on block hash Adapt unit and integration tests * sharding: rename LoadNodesPerShards to SetNodesPerShards * integrationTests: fix unhandled error warnings * fix after review * added comment * fix after review * node, main, process, sharding, integrationTests: add nodesCoordinator on the node Move checks for nodes list on the nodesCoordinator creation Adapt unit tests and integration tests. * data, process, integrationTests: fix merge issues Adapt to integration tests deltas * process: fix goimports * node, sharding, integrationTests: format go imports * integrationTests: adaptations after merge * integrationTests: adapt refactored integrationTests to nodeCoordinator * integrationTests: create nodes coordinato mock * integrationTests: sort imports * data: rewards transaction format * Feat/en 2674 add account pk in nodesSetup.json (#354) * Implementation done * Refactored after code review * cmd, consensus, process, sharding, node, integrationTests: fix block signature verifications for other chains several fixes for fee transactions * process: update fee trasaction verified errors in unit test * consensus, sharding: fix review comments - rename variables, sort imports * Merged Economics and Development * integrationTests: add integration tests for signature verification on interceptors * integrationTests: Fix integration test * process, sharding, consensus, node, integrationTests, config, dataRetriever: fees for proposer and rewards for consensus group members Add rewardTransactions pool Adapt integration tests & config * cmd, process, dataRetriever, integrationTests: reward transaction preprocessor, processor, requester & resolver Adapted unit tests and integration tests * cmd, process, config: reward community and burn addresses from config * dataRetriever, process, integrationTests: add rewards transaction interceptors, fixes on resolvers * process: interceptors for rewards and some fixes * process, integrationTests: fixes rewards preprocessor/processor/intermediate handler Integration tests and unit tests adaptations * process, integrationTests: remove unused interface methods, reward tx from fees needs to be processed last * process: do not request reward transactions from fees that can be generated * consensus, process, data, sharding, integrationTests: fix after merge * process: cache generated reward txs for reuse * process: fixes after merge * cmd: missed config parameter for rewards storage * process, consensus, integrationTests: fix cross shard rewards * process: fix after merge * process, core: elastic indexing rewards * core: indexer make sender address for reward transactions user friendly * core: no need for 32 byte long address for reward transaction sender * oricess, data, consensus, config, cmd: cleanup and fix review comments reward transactions * process: fixes after merge & review findings * process: add nil checks rewardsHandler * process: fix rewardsHandler unit tests * proocess: fix after merge * format code * format spaces to tabs * merge development to economics * consensus, process, integrationTests: goimports and rename * process, integrationTests: refactor transaction coordinator and rewards preprocessor * process: refactor rewards management in transaction coordinator * process: fix - revert state on root state missmatch * process: missing comments on exported methods * process: add unit test for indexed reward transactions * integrationTests: protocol rewards and transaction fee rewards integration test * integrationTest: remove warnings * integrationTests: refactor rewards integration test * integrationTests: add test for only protocol rewards * data, process, integrationTests: add rewards for metachain * process, integrationTests: fix metachain rewards * integrationTests: remove warnings * EN-4110: unit tests * process, dataRetriever, integrationTests: Fixes after merge * process, integrationTests: fixes after merge * EN-4110: fixes after review * EN-4120: unit tests for rewards * EN-4110: fix test after merge * EN-4120: fixes after review * process: fix do not flag used miniblocks before commit * process: iterate over all miniblocks from metablock * EN-3754: added tests for metachain rewards * EN-4120: fixes after review * process, consensus, integrationTests: fix review findings * process: remove duplicated code * data: add unit tests and mocks * data: goimports * data: fix failing unit test * process, integrationTests: fixes after merge * storage: put also overwrites * EN-1754: fixes after review * process: fix race in test --- cmd/node/config/config.toml | 19 + cmd/node/config/nodesSetup.json | 65 +- cmd/node/factory/structs.go | 260 +++++-- cmd/node/main.go | 120 ++- config/config.go | 11 +- config/tomlConfig_test.go | 11 + .../broadcast/shardChainMessenger_test.go | 2 +- consensus/interface.go | 24 - consensus/mock/blockProcessorMock.go | 5 +- consensus/mock/consensusDataContainerMock.go | 6 +- consensus/mock/consensusStateMock.go | 34 +- consensus/mock/mockTestInitializer.go | 2 +- consensus/mock/nodesCoordinatorMock.go | 107 +++ consensus/mock/validatorGroupSelectorMock.go | 55 -- consensus/mock/validatorMock.go | 15 +- consensus/spos/bls/export_test.go | 4 +- consensus/spos/bn/export_test.go | 4 +- .../spos/commonSubround/subroundStartRound.go | 17 +- .../commonSubround/subroundStartRound_test.go | 32 +- consensus/spos/consensusCore.go | 41 +- consensus/spos/consensusCoreValidator.go | 2 +- consensus/spos/consensusCoreValidator_test.go | 34 +- consensus/spos/consensusCore_test.go | 58 +- consensus/spos/consensusState.go | 26 +- consensus/spos/consensusState_test.go | 45 +- consensus/spos/interface.go | 4 +- consensus/validators/errors.go | 14 - consensus/validators/groupSelectors/errors.go | 29 - .../validators/groupSelectors/export_test.go | 9 - .../groupSelectors/indexHashedGroup.go | 186 ----- .../groupSelectors/indexHashedGroup_test.go | 363 --------- consensus/validators/validator_test.go | 61 -- core/indexer/data.go | 1 + core/indexer/elasticsearch.go | 39 + data/address/specialAddresses.go | 168 +++++ data/address/specialAddresses_test.go | 307 ++++++++ data/block/block.go | 6 +- data/{CapnpHelper.go => capnpHelper.go} | 0 data/consensusRewardData.go | 8 + data/errors.go | 15 + data/mock/addressConverterMock.go | 66 ++ data/mock/multipleShardsCoordinatorMock.go | 70 ++ data/mock/nodesCoordinatorMock.go | 191 +++++ data/mock/txTypeHandlerMock.go | 18 + data/mock/unsignedTxHandlerMock.go | 53 ++ data/rewardTx/capnp/schema.capnp | 19 + data/rewardTx/capnp/schema.capnp.go | 271 +++++++ data/rewardTx/rewardTx.go | 119 +++ data/rewardTx/rewardTx_test.go | 68 ++ data/state/errors.go | 12 + data/state/factory/accountCreatorFactory.go | 32 +- .../factory/accountCreatorFactory_test.go | 44 +- data/state/factory/accountCreator_test.go | 18 +- data/state/factory/metaAccountCreator_test.go | 19 +- data/state/factory/peerAccountCreator.go | 30 + data/state/factory/peerAccountCreator_test.go | 55 ++ data/state/peerAccount.go | 369 ++++++++++ data/state/peerAccount_test.go | 592 +++++++++++++++ data/state/peerAccountsDB.go | 6 + data/state/peerJournalEntries.go | 386 ++++++++++ data/state/peerJournalEntries_test.go | 360 +++++++++ dataRetriever/dataPool/shardDataPool.go | 11 + dataRetriever/dataPool/shardDataPool_test.go | 27 + dataRetriever/errors.go | 6 + .../shard/resolversContainerFactory.go | 20 +- .../shard/resolversContainerFactory_test.go | 6 +- dataRetriever/interface.go | 5 +- dataRetriever/mock/poolsHolderStub.go | 5 + .../requestHandlers/requestHandler.go | 39 +- .../requestHandlers/requestHandler_test.go | 94 ++- go.sum | 2 - integrationTests/consensus/consensus_test.go | 42 +- integrationTests/consensus/testInitializer.go | 123 +++- .../frontend/wallet/txInterception_test.go | 7 +- integrationTests/mock/blockProcessorMock.go | 7 + .../mock/chronologyValidatorMock.go | 16 - integrationTests/mock/hasherSpongeMock.go | 33 + integrationTests/mock/keyMock.go | 88 +++ integrationTests/mock/multiSigMock.go | 14 +- integrationTests/mock/nodesCoordinatorMock.go | 91 +++ .../mock/specialAddressHandlerMock.go | 151 ++++ integrationTests/mock/txTypeHandlerMock.go | 25 + .../mock/unsignedTxHandlerMock.go | 61 ++ .../block/executingMiniblocksSc_test.go | 44 +- .../block/executingMiniblocks_test.go | 35 +- .../block/executingRewardMiniblocks_test.go | 365 +++++++++ .../interceptedHeadersSigVerification_test.go | 158 ++++ .../smartContract/executingSCCalls_test.go | 65 +- .../smartContract/testInitilalizer.go | 294 ++++++-- integrationTests/node/getAccount_test.go | 4 +- .../block/executingMiniblocksSc_test.go | 7 +- .../block/interceptedRequestHdr_test.go | 2 +- .../transaction/interceptedResolvedTx_test.go | 81 ++ integrationTests/state/genesisState_test.go | 2 +- .../state/stateExecTransaction_test.go | 50 +- integrationTests/state/stateTrie_test.go | 24 +- integrationTests/sync/basicSync_test.go | 4 +- integrationTests/testInitializer.go | 336 +++++++-- integrationTests/testProcessorNode.go | 126 +++- .../testProcessorNodeWithMultisigner.go | 292 ++++++++ integrationTests/testSyncNode.go | 43 +- integrationTests/vm/testInitializer.go | 37 +- node/defineOptions.go | 11 + node/defineOptions_test.go | 26 + node/errors.go | 3 + node/heartbeat/hearbeatMessageInfo_test.go | 1 + node/mock/blockProcessorStub.go | 4 + node/mock/nodesCoordinatorMock.go | 111 +++ node/mock/poolsHolderStub.go | 5 + node/mock/validatorMock.go | 32 + node/node.go | 42 +- node/nodeTesting.go | 12 +- ntp/syncTime.go | 2 + process/block/argProcessor.go | 22 +- process/block/baseProcess.go | 26 +- process/block/baseProcess_test.go | 141 ++-- process/block/export_test.go | 32 +- process/block/interceptedBlockHeader.go | 81 +- process/block/interceptedBlockHeader_test.go | 29 +- process/block/interceptedMetaBlockHeader.go | 85 ++- .../block/interceptedMetaBlockHeader_test.go | 27 +- .../block/interceptors/headerInterceptor.go | 42 +- .../interceptors/headerInterceptor_test.go | 157 ++-- .../metachainHeaderInterceptor.go | 17 +- .../metachainHeaderInterceptor_test.go | 90 ++- process/block/metablock.go | 11 + process/block/metablock_test.go | 84 +++ .../block/preprocess/rewardTxPreProcessor.go | 545 ++++++++++++++ .../preprocess/rewardTxPreProcessor_test.go | 695 ++++++++++++++++++ process/block/preprocess/rewardsHandler.go | 469 ++++++++++++ .../block/preprocess/rewardsHandler_test.go | 576 +++++++++++++++ .../block/preprocess/smartContractResults.go | 15 +- .../preprocess/smartContractResults_test.go | 10 +- process/block/preprocess/transactions.go | 50 +- process/block/preprocess/transactions_test.go | 37 +- process/block/shardblock.go | 159 +++- process/block/shardblock_test.go | 179 +++-- process/constants.go | 2 + process/coordinator/process.go | 122 +-- process/coordinator/process_test.go | 178 +++-- process/coordinator/transactionType.go | 6 + process/coordinator/transactionType_test.go | 268 +++++++ process/errors.go | 54 +- process/factory/factory.go | 2 + .../metachain/interceptorsContainerFactory.go | 46 +- .../interceptorsContainerFactory_test.go | 53 +- .../shard/interceptorsContainerFactory.go | 119 ++- .../interceptorsContainerFactory_test.go | 85 ++- .../intermediateProcessorsContainerFactory.go | 56 +- ...rmediateProcessorsContainerFactory_test.go | 40 +- .../shard/preProcessorsContainerFactory.go | 82 ++- .../preProcessorsContainerFactory_test.go | 83 ++- process/interface.go | 59 +- process/mock/blockProcessorMock.go | 4 + process/mock/chronologyValidatorStub.go | 17 - process/mock/multiSigMock.go | 69 +- process/mock/nodesCoordinatorMock.go | 185 +++++ ...{poolsHolderFake.go => poolsHolderMock.go} | 58 +- process/mock/poolsHolderStub.go | 5 + process/mock/preprocessorMock.go | 19 +- process/mock/requestHandlerMock.go | 8 + process/mock/rewardTxProcessorMock.go | 24 + process/mock/specialAddressHandlerMock.go | 148 ++++ process/mock/txTypeHandlerMock.go | 26 + process/mock/unsignedTxHandlerMock.go | 61 ++ process/rewardTransaction/export_test.go | 21 + .../interceptedRewardTransaction.go | 149 ++++ .../interceptedRewardTransaction_test.go | 149 ++++ process/rewardTransaction/interceptor.go | 151 ++++ process/rewardTransaction/interceptor_test.go | 280 +++++++ process/rewardTransaction/process.go | 115 +++ process/rewardTransaction/process_test.go | 287 ++++++++ process/smartContract/export_test.go | 14 +- process/smartContract/process.go | 67 +- process/smartContract/process_test.go | 370 +++++++--- process/sync/baseSync.go | 4 + process/transaction/export_test.go | 12 + process/transaction/process.go | 64 +- process/transaction/process_test.go | 199 +++-- sharding/errors.go | 45 ++ sharding/export_test.go | 8 +- sharding/indexHashedNodesCoordinator.go | 296 ++++++++ sharding/indexHashedNodesCoordinator_test.go | 573 +++++++++++++++ sharding/interface.go | 45 ++ sharding/mock/hasherMock.go | 37 + sharding/mock/hasherStub.go | 30 + sharding/mock/invalidNodesSetupMock.json | 6 +- sharding/mock/nodesCoordinatorMock.go | 71 ++ sharding/mock/nodesSetupMock.json | 15 +- sharding/mock/validatorMock.go | 32 + sharding/nodesSetup.go | 89 ++- sharding/nodesSetup_test.go | 238 +++--- sharding/sharding.go | 18 - .../validators => sharding}/validator.go | 27 +- sharding/validator_test.go | 78 ++ 195 files changed, 14537 insertions(+), 2509 deletions(-) create mode 100644 consensus/mock/nodesCoordinatorMock.go delete mode 100644 consensus/mock/validatorGroupSelectorMock.go delete mode 100644 consensus/validators/errors.go delete mode 100644 consensus/validators/groupSelectors/errors.go delete mode 100644 consensus/validators/groupSelectors/export_test.go delete mode 100644 consensus/validators/groupSelectors/indexHashedGroup.go delete mode 100644 consensus/validators/groupSelectors/indexHashedGroup_test.go delete mode 100644 consensus/validators/validator_test.go create mode 100644 data/address/specialAddresses.go create mode 100644 data/address/specialAddresses_test.go rename data/{CapnpHelper.go => capnpHelper.go} (100%) create mode 100644 data/consensusRewardData.go create mode 100644 data/mock/addressConverterMock.go create mode 100644 data/mock/multipleShardsCoordinatorMock.go create mode 100644 data/mock/nodesCoordinatorMock.go create mode 100644 data/mock/txTypeHandlerMock.go create mode 100644 data/mock/unsignedTxHandlerMock.go create mode 100644 data/rewardTx/capnp/schema.capnp create mode 100644 data/rewardTx/capnp/schema.capnp.go create mode 100644 data/rewardTx/rewardTx.go create mode 100644 data/rewardTx/rewardTx_test.go create mode 100644 data/state/factory/peerAccountCreator.go create mode 100644 data/state/factory/peerAccountCreator_test.go create mode 100644 data/state/peerAccount.go create mode 100644 data/state/peerAccount_test.go create mode 100644 data/state/peerAccountsDB.go create mode 100644 data/state/peerJournalEntries.go create mode 100644 data/state/peerJournalEntries_test.go delete mode 100644 integrationTests/mock/chronologyValidatorMock.go create mode 100644 integrationTests/mock/hasherSpongeMock.go create mode 100644 integrationTests/mock/keyMock.go create mode 100644 integrationTests/mock/nodesCoordinatorMock.go create mode 100644 integrationTests/mock/specialAddressHandlerMock.go create mode 100644 integrationTests/mock/txTypeHandlerMock.go create mode 100644 integrationTests/mock/unsignedTxHandlerMock.go create mode 100644 integrationTests/multiShard/block/executingRewardMiniblocks_test.go create mode 100644 integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go create mode 100644 integrationTests/testProcessorNodeWithMultisigner.go create mode 100644 node/mock/nodesCoordinatorMock.go create mode 100644 node/mock/validatorMock.go create mode 100644 process/block/preprocess/rewardTxPreProcessor.go create mode 100644 process/block/preprocess/rewardTxPreProcessor_test.go create mode 100644 process/block/preprocess/rewardsHandler.go create mode 100644 process/block/preprocess/rewardsHandler_test.go create mode 100644 process/coordinator/transactionType_test.go delete mode 100644 process/mock/chronologyValidatorStub.go create mode 100644 process/mock/nodesCoordinatorMock.go rename process/mock/{poolsHolderFake.go => poolsHolderMock.go} (56%) create mode 100644 process/mock/rewardTxProcessorMock.go create mode 100644 process/mock/specialAddressHandlerMock.go create mode 100644 process/mock/txTypeHandlerMock.go create mode 100644 process/mock/unsignedTxHandlerMock.go create mode 100644 process/rewardTransaction/export_test.go create mode 100644 process/rewardTransaction/interceptedRewardTransaction.go create mode 100644 process/rewardTransaction/interceptedRewardTransaction_test.go create mode 100644 process/rewardTransaction/interceptor.go create mode 100644 process/rewardTransaction/interceptor_test.go create mode 100644 process/rewardTransaction/process.go create mode 100644 process/rewardTransaction/process_test.go create mode 100644 sharding/indexHashedNodesCoordinator.go create mode 100644 sharding/indexHashedNodesCoordinator_test.go create mode 100644 sharding/interface.go create mode 100644 sharding/mock/hasherMock.go create mode 100644 sharding/mock/hasherStub.go create mode 100644 sharding/mock/nodesCoordinatorMock.go create mode 100644 sharding/mock/validatorMock.go delete mode 100644 sharding/sharding.go rename {consensus/validators => sharding}/validator.go (64%) create mode 100644 sharding/validator_test.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 2c3ae91382c..f06c2570611 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -105,6 +105,17 @@ MaxBatchSize = 45000 MaxOpenFiles = 10 +[RewardTxStorage] + [RewardTxStorage.Cache] + Size = 10000 + Type = "LRU" + [RewardTxStorage.DB] + FilePath = "RewardTransactions" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 500 + MaxOpenFiles = 10 + [ShardHdrNonceHashStorage] [ShardHdrNonceHashStorage.Cache] Size = 1000 @@ -175,6 +186,10 @@ Size = 100000 Type = "LRU" +[RewardTransactionDataPool] + Size = 5000 + Type = "LRU" + [ShardHeadersDataPool] Size = 1000 Type = "LRU" @@ -229,3 +244,7 @@ Port = 123 Timeout = 0 # Setting 0 means 'use default value' Version = 0 # Setting 0 means 'use default value' + +[EconomicsConfig] + CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" + BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index 1f0ee05446a..a655f12fdfb 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -8,67 +8,88 @@ "metaChainMinNodes": 1, "initialNodes": [ { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "d4105de8e44aee9d4be670401cec546e5df381028e805012386a05acf76518d9" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "d11e60011ffc1b7ebb1fd4c92c2821ecef8bed5c518d76a24640153a462cdc1e" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "0f36a982b79d3c1fda9b82a646a2b423cb3e7223cffbae73a4e3d2c1ea62ee5e" }, { - "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895" + "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895", + "address": "8c93db70abe14a6aa8c4ca7b722b67f4342b4251c0f3731b12b5f75885a9b9b6" }, { - "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58" + "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58", + "address": "afb051dc3a1dfb029866730243c2cbc51d8b8ef15951e4da3929f9c8391f307a" }, { - "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338" + "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338", + "address": "86fe0a4a9bf7dbed6784b8cfbfd5a80d927be30b4debff67e60e1fd05cd2359b" }, { - "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37" + "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37", + "address": "fdc635bc2bf1477609bea5ba90365a99d4bbb023b2eaffb5c20642a2f2458dfa" }, { - "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867" + "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867", + "address": "5bdf4c81489bea69ba29cd3eea2670c1bb6cb5d922fa8cb6e17bca71dfdd49f0" }, { - "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673" + "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673", + "address": "22c2e3721a6256a5891ba612ad55343dceb6655388176f981ab2885ed756d6fd" }, { - "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846" + "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846", + "address": "f9c28a8369df5ff3f8589a0aaad93d2d8f94f5ad70d898d422c964fdd6a87d0b" }, { - "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d" + "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d", + "address": "69e34e6a9e6aeb051f46e15cae1fe7d0f8641b6bcd9ff23ab228c78b1e4418af" }, { - "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321" + "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321", + "address": "d453e66ea50b05ec3c102cdaabbcee172136f53db82ba434ca170a53483d4ad1" }, { - "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e" + "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e", + "address": "04e61f7bf892ca638451f6efeccf069d7fb5a5c82303aa27e6d28725da8ae1df" }, { - "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f" + "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f", + "address": "97d0f43b88e104aa9b0cc98c5cea96f5468a59d3986d2d187b19319a5911b7ff" }, { - "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4" + "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4", + "address": "8e660d69a8d99e9cb15323c0c8db36f1f432231a1b9a74da8ffa44a2b9abc7fe" }, { - "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb" + "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb", + "address": "a901ae67ca50d4af01f813da27613f124137be835a5d6902697ec719b2df704f" }, { - "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca" + "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca", + "address": "6b0dcc478115c270f2a6c6a9809c04b61eff8a5877b837d86810396fdb50feda" }, { - "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6" + "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6", + "address": "c53b7e4463091a999e002c75ed55c79e1f4c64e91ca8ba1b72d984dea9c0e477" }, { - "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2" + "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2", + "address": "18e6af48dad7fd4902991efb019e741e0f2a7a192c8678b1da3f4cf42c164519" }, { - "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb" + "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb", + "address": "95fe2d76c72ada51156aed96d083c993d637d7a772fb48efeb8bc3f3cedc7237" }, { - "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa" + "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa", + "address": "d6ad6476141dd798dc7b009b92b8c2d50a8caff8452a459548aa5ccb6c11b6c3" } ] -} \ No newline at end of file +} diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index d3176d14e5f..7badfc223af 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -27,6 +27,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/address" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/data/state" @@ -58,6 +59,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" processSync "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/process/track" @@ -208,12 +210,16 @@ func NewStateComponentsFactoryArgs( // StateComponentsFactory creates the state components func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { - addressConverter, err := addressConverters.NewPlainAddressConverter(args.config.Address.Length, args.config.Address.Prefix) + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.config.Address.Length, + args.config.Address.Prefix, + ) + if err != nil { return nil, errors.New("could not create address converter: " + err.Error()) } - accountFactory, err := factoryState.NewAccountFactoryCreator(args.shardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, errors.New("could not create account factory: " + err.Error()) } @@ -243,7 +249,12 @@ type dataComponentsFactoryArgs struct { } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components -func NewDataComponentsFactoryArgs(config *config.Config, shardCoordinator sharding.Coordinator, core *Core, uniqueID string) *dataComponentsFactoryArgs { +func NewDataComponentsFactoryArgs( + config *config.Config, + shardCoordinator sharding.Coordinator, + core *Core, + uniqueID string, +) *dataComponentsFactoryArgs { return &dataComponentsFactoryArgs{ config: config, shardCoordinator: shardCoordinator, @@ -344,12 +355,12 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) return nil, errors.New("could not create multisig hasher: " + err.Error()) } - currentShardPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) + currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) if err != nil { return nil, errors.New("could not start creation of multiSigner: " + err.Error()) } - multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardPubKeys, args.privKey, args.keyGen) + multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) if err != nil { return nil, err } @@ -399,9 +410,11 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, c type processComponentsFactoryArgs struct { genesisConfig *sharding.Genesis + economicsConfig *config.EconomicsConfig nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator data *Data core *Core crypto *Crypto @@ -413,9 +426,11 @@ type processComponentsFactoryArgs struct { // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( genesisConfig *sharding.Genesis, + economicsConfig *config.EconomicsConfig, nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -425,9 +440,11 @@ func NewProcessComponentsFactoryArgs( ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ genesisConfig: genesisConfig, + economicsConfig: economicsConfig, nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, data: data, core: core, crypto: crypto, @@ -440,7 +457,7 @@ func NewProcessComponentsFactoryArgs( // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.data, args.core, args.crypto, args.state, args.network) + args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) if err != nil { return nil, err } @@ -494,13 +511,17 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err blockProcessor, blockTracker, err := newBlockProcessorAndTracker( resolversFinder, args.shardCoordinator, + args.nodesCoordinator, + args.economicsConfig, args.data, args.core, args.state, forkDetector, shardsGenesisBlocks, + args.nodesConfig, args.coreServiceContainer, ) + if err != nil { return nil, err } @@ -584,25 +605,6 @@ func (srr *seedRandReader) Read(p []byte) (n int, err error) { return len(p), nil } -type nullChronologyValidator struct { -} - -// ValidateReceivedBlock should validate if parameters to be checked are valid -// In this implementation it just returns nil -func (*nullChronologyValidator) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - //TODO when implementing a workable variant take into account to receive headers "from future" (nonce or round > current round) - // as this might happen when clocks are slightly de-synchronized - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ncv *nullChronologyValidator) IsInterfaceNil() bool { - if ncv == nil { - return true - } - return false -} - // CreateStatusHandlerPresenter will return an instance of PresenterStatusHandler func CreateStatusHandlerPresenter() view.Presenter { presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() @@ -652,7 +654,13 @@ func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { return nil, errors.New("no marshalizer provided in config file") } -func getTrie(cfg config.StorageConfig, marshalizer marshal.Marshalizer, hasher hashing.Hasher, uniqueID string) (data.Trie, error) { +func getTrie( + cfg config.StorageConfig, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + uniqueID string, +) (data.Trie, error) { + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( getCacherFromConfig(cfg.Cache), getDBFromConfig(cfg.DB, uniqueID), @@ -726,7 +734,16 @@ func createShardDataStoreFromConfig( shardCoordinator sharding.Coordinator, uniqueID string, ) (dataRetriever.StorageService, error) { - var headerUnit, peerBlockUnit, miniBlockUnit, txUnit, metachainHeaderUnit, unsignedTxUnit, metaHdrHashNonceUnit, shardHdrHashNonceUnit *storageUnit.Unit + + var headerUnit *storageUnit.Unit + var peerBlockUnit *storageUnit.Unit + var miniBlockUnit *storageUnit.Unit + var txUnit *storageUnit.Unit + var metachainHeaderUnit *storageUnit.Unit + var unsignedTxUnit *storageUnit.Unit + var rewardTxUnit *storageUnit.Unit + var metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnit *storageUnit.Unit var err error defer func() { @@ -747,6 +764,9 @@ func createShardDataStoreFromConfig( if unsignedTxUnit != nil { _ = unsignedTxUnit.DestroyUnit() } + if rewardTxUnit != nil { + _ = rewardTxUnit.DestroyUnit() + } if metachainHeaderUnit != nil { _ = metachainHeaderUnit.DestroyUnit() } @@ -775,6 +795,14 @@ func createShardDataStoreFromConfig( return nil, err } + rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.RewardTxStorage.Cache), + getDBFromConfig(config.RewardTxStorage.DB, uniqueID), + getBloomFromConfig(config.RewardTxStorage.Bloom)) + if err != nil { + return nil, err + } + miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( getCacherFromConfig(config.MiniBlocksStorage.Cache), getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), @@ -833,6 +861,7 @@ func createShardDataStoreFromConfig( store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) @@ -958,7 +987,13 @@ func createShardDataPoolFromConfig( uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) if err != nil { - log.Info("error creating smart contract result") + log.Info("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) + if err != nil { + log.Info("error creating reward transaction pool") return nil, err } @@ -1005,6 +1040,7 @@ func createShardDataPoolFromConfig( return dataPool.NewShardedDataPool( txPool, uTxPool, + rewardTxPool, hdrPool, hdrNonces, txBlockBody, @@ -1139,6 +1175,7 @@ func createNetMessenger( func newInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1146,10 +1183,25 @@ func newInterceptorAndResolverContainerFactory( network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, state, network) + return newShardInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + state, + network, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, network) + return newMetaInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + network, + ) } return nil, nil, errors.New("could not create interceptor and resolver container factory") @@ -1157,6 +1209,7 @@ func newInterceptorAndResolverContainerFactory( func newShardInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1167,6 +1220,7 @@ func newShardInterceptorAndResolverContainerFactory( interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( state.AccountsAdapter, shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, @@ -1176,7 +1230,6 @@ func newShardInterceptorAndResolverContainerFactory( crypto.MultiSigner, data.Datapool, state.AddressConverter, - &nullChronologyValidator{}, ) if err != nil { return nil, nil, err @@ -1205,6 +1258,7 @@ func newShardInterceptorAndResolverContainerFactory( func newMetaInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -1213,13 +1267,13 @@ func newMetaInterceptorAndResolverContainerFactory( //TODO add a real chronology validator and remove null chronology validator interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, core.Hasher, crypto.MultiSigner, data.MetaDatapool, - &nullChronologyValidator{}, ) if err != nil { return nil, nil, err @@ -1302,7 +1356,11 @@ func generateGenesisHeadersAndApplyInitialBalances( shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard - genesisBlock, err := genesis.CreateMetaGenesisBlock(uint64(nodesSetup.StartTime), nodesSetup.InitialNodesPubKeys()) + genesisBlock, err := genesis.CreateMetaGenesisBlock( + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) + if err != nil { return nil, err } @@ -1345,7 +1403,7 @@ func createInMemoryShardCoordinatorAndAccount( return nil, nil, err } - accountFactory, err := factoryState.NewAccountFactoryCreator(newShardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, nil, err } @@ -1376,18 +1434,71 @@ func newForkDetector( func newBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + economicsConfig *config.EconomicsConfig, data *Data, core *Core, state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { + + if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { + return nil, nil, errors.New("rewards configuration missing") + } + + communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) + if err != nil { + return nil, nil, err + } + + burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) + if err != nil { + return nil, nil, err + } + + specialAddressHolder, err := address.NewSpecialAddressHolder( + communityAddress, + burnAddress, + state.AddressConverter, + shardCoordinator, + nodesCoordinator, + ) + if err != nil { + return nil, nil, err + } + + // TODO: remove nodesConfig as no longer needed with nodes coordinator available if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, coreServiceContainer) + return newShardBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + nodesConfig, + coreServiceContainer, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, coreServiceContainer) + return newMetaBlockProcessorAndTracker( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + ) } return nil, nil, errors.New("could not create block processor and tracker") @@ -1396,11 +1507,14 @@ func newBlockProcessorAndTracker( func newShardBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, + nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { argsParser, err := smartContract.NewAtArgumentParser() @@ -1423,7 +1537,9 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, core.Hasher, state.AddressConverter, + specialAddressHandler, data.Store, + data.Datapool, ) if err != nil { return nil, nil, err @@ -1439,6 +1555,21 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) + if err != nil { + return nil, nil, err + } + + rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + internalTransactionProducer, ok := rewardsTxInterim.(process.InternalTransactionProducer) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + scProcessor, err := smartContract.NewSmartContractProcessor( vmContainer, argsParser, @@ -1449,6 +1580,7 @@ func newShardBlockProcessorAndTracker( state.AddressConverter, shardCoordinator, scForwarder, + rewardsTxHandler, ) if err != nil { return nil, nil, err @@ -1458,6 +1590,7 @@ func newShardBlockProcessorAndTracker( resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, @@ -1467,6 +1600,21 @@ func newShardBlockProcessorAndTracker( return nil, nil, err } + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( + state.AccountsAdapter, + state.AddressConverter, + shardCoordinator, + rewardsTxInterim, + ) + if err != nil { + return nil, nil, err + } + + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) + if err != nil { + return nil, nil, err + } + transactionProcessor, err := transaction.NewTxProcessor( state.AccountsAdapter, core.Hasher, @@ -1474,6 +1622,8 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, shardCoordinator, scProcessor, + rewardsTxHandler, + txTypeHandler, ) if err != nil { return nil, nil, errors.New("could not create transaction processor: " + err.Error()) @@ -1501,6 +1651,8 @@ func newShardBlockProcessorAndTracker( transactionProcessor, scProcessor, scProcessor, + rewardsTxProcessor, + internalTransactionProducer, ) if err != nil { return nil, nil, err @@ -1534,16 +1686,18 @@ func newShardBlockProcessorAndTracker( } argumentsBaseProcessor := block.ArgBaseProcessor{ - Accounts: state.AccountsAdapter, - ForkDetector: forkDetector, - Hasher: core.Hasher, - Marshalizer: core.Marshalizer, - Store: data.Store, - ShardCoordinator: shardCoordinator, - Uint64Converter: core.Uint64ByteSliceConverter, - StartHeaders: shardsGenesisBlocks, - RequestHandler: requestHandler, - Core: coreServiceContainer, + Accounts: state.AccountsAdapter, + ForkDetector: forkDetector, + Hasher: core.Hasher, + Marshalizer: core.Marshalizer, + Store: data.Store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: core.Uint64ByteSliceConverter, + StartHeaders: shardsGenesisBlocks, + RequestHandler: requestHandler, + Core: coreServiceContainer, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: &argumentsBaseProcessor, @@ -1569,6 +1723,8 @@ func newShardBlockProcessorAndTracker( func newMetaBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, @@ -1595,6 +1751,8 @@ func newMetaBlockProcessorAndTracker( data.MetaDatapool, forkDetector, shardCoordinator, + nodesCoordinator, + specialAddressHandler, core.Hasher, core.Marshalizer, data.Store, @@ -1613,6 +1771,7 @@ func newMetaBlockProcessorAndTracker( return metaProcessor, blockTracker, nil } + func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { return storageUnit.CacheConfig{ Size: cfg.Size, @@ -1710,7 +1869,14 @@ func decodeAddress(address string) ([]byte, error) { return hex.DecodeString(address) } -func getSk(ctx *cli.Context, log *logger.Logger, skName string, skIndexName string, skPemFileName string) ([]byte, error) { +func getSk( + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, +) ([]byte, error) { + //if flag is defined, it shall overwrite what was read from pem file if ctx.GlobalIsSet(skName) { encodedSk := []byte(ctx.GlobalString(skName)) diff --git a/cmd/node/main.go b/cmd/node/main.go index 21543bd60b1..ff6809d38f3 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "math" + "math/big" "net/http" "os" "os/signal" @@ -482,6 +483,15 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + nodesCoordinator, err := createNodesCoordinator( + nodesConfig, + generalConfig.GeneralSettings, + pubKey, + coreComponents.Hasher) + if err != nil { + return err + } + stateArgs := factory.NewStateComponentsFactoryArgs(generalConfig, genesisConfig, shardCoordinator, coreComponents) stateComponents, err := factory.StateComponentsFactory(stateArgs) if err != nil { @@ -551,8 +561,18 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } - cryptoArgs := factory.NewCryptoComponentsFactoryArgs(ctx, generalConfig, nodesConfig, shardCoordinator, keyGen, - privKey, log, initialBalancesSkPemFile.Name, txSignSk.Name, txSignSkIndex.Name) + cryptoArgs := factory.NewCryptoComponentsFactoryArgs( + ctx, + generalConfig, + nodesConfig, + shardCoordinator, + keyGen, + privKey, + log, + initialBalancesSkPemFile.Name, + txSignSk.Name, + txSignSkIndex.Name, + ) cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) if err != nil { return err @@ -611,8 +631,22 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - processArgs := factory.NewProcessComponentsFactoryArgs(genesisConfig, nodesConfig, syncer, shardCoordinator, - dataComponents, coreComponents, cryptoComponents, stateComponents, networkComponents, coreServiceContainer) + economicsConfig := &generalConfig.EconomicsConfig + + processArgs := factory.NewProcessComponentsFactoryArgs( + genesisConfig, + economicsConfig, + nodesConfig, + syncer, + shardCoordinator, + nodesCoordinator, + dataComponents, + coreComponents, + cryptoComponents, + stateComponents, + networkComponents, + coreServiceContainer, + ) processComponents, err := factory.ProcessComponentsFactory(processArgs) if err != nil { return err @@ -626,6 +660,7 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { privKey, pubKey, shardCoordinator, + nodesCoordinator, coreComponents, stateComponents, dataComponents, @@ -949,23 +984,31 @@ func loadMainConfig(filepath string, log *logger.Logger) (*config.Config, error) return cfg, nil } -func createShardCoordinator( - nodesConfig *sharding.NodesSetup, - pubKey crypto.PublicKey, - settingsConfig config.GeneralSettingsConfig, - log *logger.Logger, -) (sharding.Coordinator, core.NodeType, error) { - +func getShardIdFromNodePubKey(pubKey crypto.PublicKey, nodesConfig *sharding.NodesSetup) (uint32, error) { if pubKey == nil { - return nil, "", errors.New("nil public key, could not create shard coordinator") + return 0, errors.New("nil public key") } publicKey, err := pubKey.ToByteArray() if err != nil { - return nil, "", err + return 0, err } selfShardId, err := nodesConfig.GetShardIDForPubKey(publicKey) + if err != nil { + return 0, err + } + + return selfShardId, err +} + +func createShardCoordinator( + nodesConfig *sharding.NodesSetup, + pubKey crypto.PublicKey, + settingsConfig config.GeneralSettingsConfig, + log *logger.Logger, +) (sharding.Coordinator, core.NodeType, error) { + selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) nodeType := core.NodeTypeValidator if err == sharding.ErrPublicKeyNotFoundInGenesis { nodeType = core.NodeTypeObserver @@ -993,6 +1036,55 @@ func createShardCoordinator( return shardCoordinator, nodeType, nil } +func createNodesCoordinator( + nodesConfig *sharding.NodesSetup, + settingsConfig config.GeneralSettingsConfig, + pubKey crypto.PublicKey, + hasher hashing.Hasher, +) (sharding.NodesCoordinator, error) { + + shardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) + if err == sharding.ErrPublicKeyNotFoundInGenesis { + shardId, err = processDestinationShardAsObserver(settingsConfig) + } + if err != nil { + return nil, err + } + + nbShards := nodesConfig.NumberOfShards() + shardConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) + metaConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) + initNodesInfo := nodesConfig.InitialNodesInfo() + initValidators := make(map[uint32][]sharding.Validator) + + for shardId, nodeInfoList := range initNodesInfo { + validators := make([]sharding.Validator, 0) + for _, nodeInfo := range nodeInfoList { + validator, err := sharding.NewValidator(big.NewInt(0), 0, nodeInfo.PubKey(), nodeInfo.Address()) + if err != nil { + return nil, err + } + + validators = append(validators, validator) + } + initValidators[shardId] = validators + } + + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + hasher, + shardId, + nbShards, + initValidators, + ) + if err != nil { + return nil, err + } + + return nodesCoordinator, nil +} + func processDestinationShardAsObserver(settingsConfig config.GeneralSettingsConfig) (uint32, error) { destShard := strings.ToLower(settingsConfig.DestinationShardAsObserver) if len(destShard) == 0 { @@ -1061,6 +1153,7 @@ func createNode( privKey crypto.PrivateKey, pubKey crypto.PublicKey, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, core *factory.Core, state *factory.State, data *factory.Data, @@ -1092,6 +1185,7 @@ func createNode( node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(process.Rounder), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithUint64ByteSliceConverter(core.Uint64ByteSliceConverter), node.WithSingleSigner(crypto.SingleSigner), node.WithMultiSigner(crypto.MultiSigner), diff --git a/config/config.go b/config/config.go index 876a45998c4..4c3dccc0447 100644 --- a/config/config.go +++ b/config/config.go @@ -56,6 +56,12 @@ type NTPConfig struct { Version int } +// EconomicsConfig will hold the reward configuration +type EconomicsConfig struct { + CommunityAddress string + BurnAddress string +} + // Config will hold the entire application configuration parameters type Config struct { MiniBlocksStorage StorageConfig @@ -63,6 +69,7 @@ type Config struct { BlockHeaderStorage StorageConfig TxStorage StorageConfig UnsignedTransactionStorage StorageConfig + RewardTxStorage StorageConfig ShardHdrNonceHashStorage StorageConfig MetaHdrNonceHashStorage StorageConfig @@ -80,6 +87,7 @@ type Config struct { BlockHeaderNoncesDataPool CacheConfig TxDataPool CacheConfig UnsignedTransactionDataPool CacheConfig + RewardTransactionDataPool CacheConfig MetaBlockBodyDataPool CacheConfig MiniBlockHeaderHashesDataPool CacheConfig @@ -98,7 +106,8 @@ type Config struct { Consensus TypeConfig Explorer ExplorerConfig - NTPConfig NTPConfig + NTPConfig NTPConfig + EconomicsConfig EconomicsConfig } // NodeConfig will hold basic p2p settings diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 460a36f97e2..5773ddc42b5 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -32,6 +32,9 @@ func TestTomlParser(t *testing.T) { consensusType := "bn" + communityAddress := "community" + burnAddress := "burnAddress" + cfgExpected := Config{ MiniBlocksStorage: StorageConfig{ Cache: CacheConfig{ @@ -71,6 +74,10 @@ func TestTomlParser(t *testing.T) { Consensus: TypeConfig{ Type: consensusType, }, + EconomicsConfig: EconomicsConfig{ + CommunityAddress: communityAddress, + BurnAddress: burnAddress, + }, } testString := ` @@ -107,6 +114,10 @@ func TestTomlParser(t *testing.T) { [Consensus] Type = "` + consensusType + `" + +[EconomicsConfig] + CommunityAddress = "` + communityAddress + `" + BurnAddress = "` + burnAddress + `" ` cfg := Config{} diff --git a/consensus/broadcast/shardChainMessenger_test.go b/consensus/broadcast/shardChainMessenger_test.go index 7f9a959ba58..ab21755bd59 100644 --- a/consensus/broadcast/shardChainMessenger_test.go +++ b/consensus/broadcast/shardChainMessenger_test.go @@ -1,7 +1,6 @@ package broadcast_test import ( - "github.com/ElrondNetwork/elrond-go/process/factory" "testing" "time" @@ -9,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/stretchr/testify/assert" ) diff --git a/consensus/interface.go b/consensus/interface.go index 55c7637db9c..94c29bac2ef 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -1,7 +1,6 @@ package consensus import ( - "math/big" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -53,29 +52,6 @@ type SposFactory interface { IsInterfaceNil() bool } -// Validator defines what a consensus validator implementation should do. -type Validator interface { - Stake() *big.Int - Rating() int32 - PubKey() []byte - IsInterfaceNil() bool -} - -// ValidatorGroupSelector defines the behaviour of a struct able to do validator group selection -type ValidatorGroupSelector interface { - PublicKeysSelector - LoadEligibleList(eligibleList []Validator) error - ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) - ConsensusGroupSize() int - SetConsensusGroupSize(int) error -} - -// PublicKeysSelector allows retrieval of eligible validators public keys selected by a bitmap -type PublicKeysSelector interface { - GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) - IsInterfaceNil() bool -} - // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 8129a3e29b7..d77ac805df7 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -38,7 +38,7 @@ func (blProcMock *BlockProcessorMock) RevertAccountState() { blProcMock.RevertAccountStateCalled() } -// CreateTxBlockBody mocks the creation of a transaction block body +// CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { return blProcMock.CreateBlockCalled(round, haveTime) } @@ -67,6 +67,9 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index 05f97e29f2f..d360818ea99 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -26,7 +26,7 @@ type ConsensusCoreMock struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + validatorGroupSelector sharding.NodesCoordinator } func (cdc *ConsensusCoreMock) Blockchain() data.ChainHandler { @@ -77,7 +77,7 @@ func (cdc *ConsensusCoreMock) SyncTimer() ntp.SyncTimer { return cdc.syncTimer } -func (cdc *ConsensusCoreMock) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +func (cdc *ConsensusCoreMock) NodesCoordinator() sharding.NodesCoordinator { return cdc.validatorGroupSelector } @@ -124,7 +124,7 @@ func (cdc *ConsensusCoreMock) SetSyncTimer(syncTimer ntp.SyncTimer) { cdc.syncTimer = syncTimer } -func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector consensus.ValidatorGroupSelector) { +func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector sharding.NodesCoordinator) { cdc.validatorGroupSelector = validatorGroupSelector } diff --git a/consensus/mock/consensusStateMock.go b/consensus/mock/consensusStateMock.go index 84e17dee6ee..ae0a2562ed9 100644 --- a/consensus/mock/consensusStateMock.go +++ b/consensus/mock/consensusStateMock.go @@ -1,13 +1,16 @@ package mock -import "github.com/ElrondNetwork/elrond-go/consensus" +import ( + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/sharding" +) type ConsensusStateMock struct { ResetConsensusStateCalled func() IsNodeLeaderInCurrentRoundCalled func(node string) bool IsSelfLeaderInCurrentRoundCalled func() bool GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, error) + GetNextConsensusGroupCalled func(randomSource string, vgs sharding.NodesCoordinator) ([]string, error) IsConsensusDataSetCalled func() bool IsConsensusDataEqualCalled func(data []byte) bool IsJobDoneCalled func(node string, currentSubroundId int) bool @@ -17,13 +20,12 @@ type ConsensusStateMock struct { IsBlockBodyAlreadyReceivedCalled func() bool IsHeaderAlreadyReceivedCalled func() bool CanDoSubroundJobCalled func(currentSubroundId int) bool - CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool - GenerateBitmapCalled func(subroundId int) []byte - ProcessingBlockCalled func() bool - SetProcessingBlockCalled func(processingBlock bool) - ConsensusGroupSizeCalled func() int - SetThresholdCalled func(subroundId int, threshold int) + CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, currentSubroundId int) bool + GenerateBitmapCalled func(subroundId int) []byte + ProcessingBlockCalled func() bool + SetProcessingBlockCalled func(processingBlock bool) + ConsensusGroupSizeCalled func() int + SetThresholdCalled func(subroundId int, threshold int) } func (cnsm *ConsensusStateMock) ResetConsensusState() { @@ -42,9 +44,10 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { return cnsm.GetLeaderCalled() } -func (cnsm *ConsensusStateMock) GetNextConsensusGroup(randomSource string, - vgs consensus.ValidatorGroupSelector) ([]string, - error) { +func (cnsm *ConsensusStateMock) GetNextConsensusGroup( + randomSource string, + vgs sharding.NodesCoordinator, +) ([]string, error) { return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) } @@ -84,8 +87,11 @@ func (cnsm *ConsensusStateMock) CanDoSubroundJob(currentSubroundId int) bool { return cnsm.CanDoSubroundJobCalled(currentSubroundId) } -func (cnsm *ConsensusStateMock) CanProcessReceivedMessage(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool { +func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( + cnsDta consensus.Message, + currentRoundIndex int32, + currentSubroundId int, +) bool { return cnsm.CanProcessReceivedMessageCalled(cnsDta, currentRoundIndex, currentSubroundId) } diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index d355fae6784..e45adea2759 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -122,7 +122,7 @@ func InitConsensusCore() *ConsensusCoreMock { rounderMock := &RounderMock{} shardCoordinatorMock := ShardCoordinatorMock{} syncTimerMock := &SyncTimerMock{} - validatorGroupSelector := &ValidatorGroupSelectorMock{} + validatorGroupSelector := &NodesCoordinatorMock{} container := &ConsensusCoreMock{ blockChain, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..56d1b14bb22 --- /dev/null +++ b/consensus/mock/nodesCoordinatorMock.go @@ -0,0 +1,107 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go deleted file mode 100644 index 31ff0a70fd6..00000000000 --- a/consensus/mock/validatorGroupSelectorMock.go +++ /dev/null @@ -1,55 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/ElrondNetwork/elrond-go/consensus" -) - -type ValidatorGroupSelectorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) -} - -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { - if vgsm.ComputeValidatorsGroupCalled != nil { - return vgsm.ComputeValidatorsGroupCalled(randomness) - } - - list := []consensus.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), - } - - return list, nil -} - -func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { - panic("implement me") -} - -func (vgsm ValidatorGroupSelectorMock) LoadEligibleList(eligibleList []consensus.Validator) error { - return nil -} - -func (vgsm ValidatorGroupSelectorMock) SetConsensusGroupSize(int) error { - panic("implement me") -} - -func (vgsm ValidatorGroupSelectorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { - panic("implement me") -} - -// IsInterfaceNil returns true if there is no value under the interface -func (vgsm *ValidatorGroupSelectorMock) IsInterfaceNil() bool { - if vgsm == nil { - return true - } - return false -} diff --git a/consensus/mock/validatorMock.go b/consensus/mock/validatorMock.go index 517c49e6dcb..56621342a59 100644 --- a/consensus/mock/validatorMock.go +++ b/consensus/mock/validatorMock.go @@ -5,13 +5,14 @@ import ( ) type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} } func (vm *ValidatorMock) Stake() *big.Int { @@ -26,6 +27,10 @@ func (vm *ValidatorMock) PubKey() []byte { return vm.pubKey } +func (vm *ValidatorMock) Address() []byte { + return vm.address +} + // IsInterfaceNil returns true if there is no value under the interface func (vm *ValidatorMock) IsInterfaceNil() bool { if vm == nil { diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index de65b8dd80f..41c9fcbb471 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index af3016b28f5..45e79808b5c 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 8f294fd50aa..c4a164fcfbd 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -204,11 +204,20 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error } } - randomSource := fmt.Sprintf("%d-%s", roundIndex, core.ToB64(currentHeader.GetRandSeed())) + randomSeed := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", randomSource)) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", + core.ToB64(randomSeed)), + ) + + shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, sr.ValidatorGroupSelector()) + nextConsensusGroup, _, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + ) if err != nil { return err } @@ -224,5 +233,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) + sr.BlockProcessor().SetConsensusData(randomSeed, uint64(sr.RoundIndex), currentHeader.GetEpoch(), shardId) + return nil } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 22b8163861d..77ecb1d5729 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -25,8 +25,12 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S return startRound, err } -func defaultSubround(consensusState *spos.ConsensusState, ch chan bool, container spos.ConsensusCoreHandler) (*spos.Subround, - error) { +func defaultSubround( + consensusState *spos.ConsensusState, + ch chan bool, + container spos.ConsensusCoreHandler, +) (*spos.Subround, error) { + return spos.NewSubround( -1, int(SrStartRound), @@ -319,9 +323,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenShouldSyncRetur func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() @@ -336,9 +340,13 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { - return make([]consensus.Validator, 0), nil + validatorGroupSelector := &mock.NodesCoordinatorMock{} + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { + return make([]sharding.Validator, 0), nil } container := mock.InitConsensusCore() @@ -423,10 +431,14 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 35f9989cf97..88aef489bb8 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,21 +14,21 @@ import ( // ConsensusCore implements ConsensusCoreHandler and provides access to common functionalities // for the rest of the consensus structures type ConsensusCore struct { - blockChain data.ChainHandler - blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker - bootstrapper process.Bootstrapper - broadcastMessenger consensus.BroadcastMessenger - chronologyHandler consensus.ChronologyHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - blsPrivateKey crypto.PrivateKey - blsSingleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - rounder consensus.Rounder - shardCoordinator sharding.Coordinator - syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + blockChain data.ChainHandler + blockProcessor process.BlockProcessor + blocksTracker process.BlocksTracker + bootstrapper process.Bootstrapper + broadcastMessenger consensus.BroadcastMessenger + chronologyHandler consensus.ChronologyHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + blsPrivateKey crypto.PrivateKey + blsSingleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + rounder consensus.Rounder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + syncTimer ntp.SyncTimer } // NewConsensusCore creates a new ConsensusCore instance @@ -46,8 +46,9 @@ func NewConsensusCore( multiSigner crypto.MultiSigner, rounder consensus.Rounder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, syncTimer ntp.SyncTimer, - validatorGroupSelector consensus.ValidatorGroupSelector) (*ConsensusCore, error) { +) (*ConsensusCore, error) { consensusCore := &ConsensusCore{ blockChain, @@ -63,8 +64,8 @@ func NewConsensusCore( multiSigner, rounder, shardCoordinator, + nodesCoordinator, syncTimer, - validatorGroupSelector, } err := ValidateConsensusCore(consensusCore) @@ -135,9 +136,9 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } -// ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore -func (cc *ConsensusCore) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return cc.validatorGroupSelector +// NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore +func (cc *ConsensusCore) NodesCoordinator() sharding.NodesCoordinator { + return cc.nodesCoordinator } // RandomnessPrivateKey returns the BLS private key stored in the ConsensusStore diff --git a/consensus/spos/consensusCoreValidator.go b/consensus/spos/consensusCoreValidator.go index cda289a634f..50ca27e1a36 100644 --- a/consensus/spos/consensusCoreValidator.go +++ b/consensus/spos/consensusCoreValidator.go @@ -41,7 +41,7 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error { if container.SyncTimer() == nil || container.SyncTimer().IsInterfaceNil() { return ErrNilSyncTimer } - if container.ValidatorGroupSelector() == nil || container.ValidatorGroupSelector().IsInterfaceNil() { + if container.NodesCoordinator() == nil || container.NodesCoordinator().IsInterfaceNil() { return ErrNilValidatorGroupSelector } if container.RandomnessPrivateKey() == nil || container.RandomnessPrivateKey().IsInterfaceNil() { diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index b09cf6f0fdc..d55fac01683 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -22,24 +22,24 @@ func initConsensusDataContainer() *ConsensusCore { rounderMock := &mock.RounderMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} syncTimerMock := &mock.SyncTimerMock{} - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} return &ConsensusCore{ - blockChain: blockChain, - blockProcessor: blockProcessorMock, - blocksTracker: blocksTrackerMock, - bootstrapper: bootstrapperMock, - broadcastMessenger: broadcastMessengerMock, - chronologyHandler: chronologyHandlerMock, - hasher: hasherMock, - marshalizer: marshalizerMock, - blsPrivateKey: blsPrivateKeyMock, - blsSingleSigner: blsSingleSignerMock, - multiSigner: multiSignerMock, - rounder: rounderMock, - shardCoordinator: shardCoordinatorMock, - syncTimer: syncTimerMock, - validatorGroupSelector: validatorGroupSelector, + blockChain: blockChain, + blockProcessor: blockProcessorMock, + blocksTracker: blocksTrackerMock, + bootstrapper: bootstrapperMock, + broadcastMessenger: broadcastMessengerMock, + chronologyHandler: chronologyHandlerMock, + hasher: hasherMock, + marshalizer: marshalizerMock, + blsPrivateKey: blsPrivateKeyMock, + blsSingleSigner: blsSingleSignerMock, + multiSigner: multiSignerMock, + rounder: rounderMock, + shardCoordinator: shardCoordinatorMock, + syncTimer: syncTimerMock, + nodesCoordinator: validatorGroupSelector, } } @@ -157,7 +157,7 @@ func TestConsensusContainerValidator_ValidateNilValidatorGroupSelectorShouldFail t.Parallel() container := initConsensusDataContainer() - container.validatorGroupSelector = nil + container.nodesCoordinator = nil err := ValidateConsensusCore(container) diff --git a/consensus/spos/consensusCore_test.go b/consensus/spos/consensusCore_test.go index 21df3eaa9ff..11cd925ffdd 100644 --- a/consensus/spos/consensusCore_test.go +++ b/consensus/spos/consensusCore_test.go @@ -27,8 +27,9 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockChain, err) @@ -53,8 +54,9 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockProcessor, err) @@ -79,8 +81,9 @@ func TestConsensusCore_WithNilBlocksTrackerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlocksTracker, err) @@ -105,8 +108,9 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBootstrapper, err) @@ -131,8 +135,9 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBroadcastMessenger, err) @@ -157,8 +162,9 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilChronologyHandler, err) @@ -183,8 +189,9 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilHasher, err) @@ -209,8 +216,9 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMarshalizer, err) @@ -235,8 +243,9 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsPrivateKey, err) @@ -261,8 +270,9 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsSingleSigner, err) @@ -287,8 +297,9 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { nil, consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMultiSigner, err) @@ -313,8 +324,9 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), nil, consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilRounder, err) @@ -339,14 +351,15 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), nil, + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilShardCoordinator, err) } -func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { +func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -366,13 +379,14 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), nil, - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.SyncTimer(), + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilSyncTimer, err) + assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) } -func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { +func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -391,11 +405,12 @@ func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), - consensusCoreMock.SyncTimer(), - nil) + consensusCoreMock.NodesCoordinator(), + nil, + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) + assert.Equal(t, spos.ErrNilSyncTimer, err) } func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { @@ -417,8 +432,9 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.NotNil(t, consensusCore) assert.Nil(t, err) diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 25607578f8c..cb34e1b472a 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() @@ -94,21 +95,28 @@ func (cns *ConsensusState) GetLeader() (string, error) { // GetNextConsensusGroup gets the new consensus group for the current round based on current eligible list and a random // source for the new selection -func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, - error) { - validatorsGroup, err := vgs.ComputeValidatorsGroup([]byte(randomSource)) - +func (cns *ConsensusState) GetNextConsensusGroup( + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator sharding.NodesCoordinator, +) ([]string, []string, error) { + + validatorsGroup, err := nodesCoordinator.ComputeValidatorsGroup(randomSource, round, shardId) if err != nil { - return nil, err + return nil, nil, err } - newConsensusGroup := make([]string, 0) + consensusSize := len(validatorsGroup) + newConsensusGroup := make([]string, consensusSize) + consensusRewardAddresses := make([]string, consensusSize) - for i := 0; i < len(validatorsGroup); i++ { - newConsensusGroup = append(newConsensusGroup, string(validatorsGroup[i].PubKey())) + for i := 0; i < consensusSize; i++ { + newConsensusGroup[i] = string(validatorsGroup[i].PubKey()) + consensusRewardAddresses[i] = string(validatorsGroup[i].Address()) } - return newConsensusGroup, nil + return newConsensusGroup, consensusRewardAddresses, nil } // IsConsensusDataSet method returns true if the consensus data for the current round is set and false otherwise diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 97d186f73d8..eb67fc567a2 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,11 +5,11 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/consensus/mock" + "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -138,13 +138,17 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou cns := internalInitConsensusState() - vgs := &mock.ValidatorGroupSelectorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} err := errors.New("error") - vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]consensus.Validator, error) { + nodesCoordinator.ComputeValidatorsGroupCalled = func( + randomness []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } - _, err2 := cns.GetNextConsensusGroup("", vgs) + _, _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator) assert.Equal(t, err, err2) } @@ -153,11 +157,12 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { cns := internalInitConsensusState() - vgs := &mock.ValidatorGroupSelectorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} - nextConsensusGroup, err := cns.GetNextConsensusGroup("", vgs) + nextConsensusGroup, rewardAddresses, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) + assert.NotNil(t, rewardAddresses) } func TestConsensusState_IsConsensusDataSetShouldReturnTrue(t *testing.T) { @@ -209,13 +214,13 @@ func TestConsensusState_IsJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, false) + _ = cns.SetJobDone("1", bn.SrBlock, false) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("1", bn.SrCommitment, true) + _ = cns.SetJobDone("1", bn.SrCommitment, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("2", bn.SrBlock, true) + _ = cns.SetJobDone("2", bn.SrBlock, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -224,7 +229,7 @@ func TestConsensusState_IsJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.True(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -234,13 +239,13 @@ func TestConsensusState_IsSelfJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -249,7 +254,7 @@ func TestConsensusState_IsSelfJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.True(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -348,7 +353,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenSelfJobIsDone(t *te cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) } @@ -359,7 +364,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenCurrentRoundIsFinis cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsFinished) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -371,7 +376,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsNotFinished) assert.True(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -413,7 +418,7 @@ func TestConsensusState_CanProcessReceivedMessageShouldReturnFalseWhenJobIsDone( PubKey: []byte("1"), } - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.False(t, cns.CanProcessReceivedMessage(cnsDta, 0, bn.SrBlock)) } @@ -455,7 +460,7 @@ func TestConsensusState_GenerateBitmapShouldWork(t *testing.T) { selfIndexInConsensusGroup, _ := cns.SelfConsensusGroupIndex() bitmapExpected[selfIndexInConsensusGroup/8] |= 1 << (uint16(selfIndexInConsensusGroup) % 8) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) bitmap := cns.GenerateBitmap(bn.SrBlock) assert.Equal(t, bitmapExpected, bitmap) diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 3443c22a40d..02346c8657b 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -38,8 +38,8 @@ type ConsensusCoreHandler interface { ShardCoordinator() sharding.Coordinator // SyncTimer gets the SyncTimer stored in the ConsensusCore SyncTimer() ntp.SyncTimer - // ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore - ValidatorGroupSelector() consensus.ValidatorGroupSelector + // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore + NodesCoordinator() sharding.NodesCoordinator // RandomnessPrivateKey returns the private key stored in the ConsensusStore used for randomness generation RandomnessPrivateKey() crypto.PrivateKey // RandomnessSingleSigner returns the single signer stored in the ConsensusStore used for randomness generation diff --git a/consensus/validators/errors.go b/consensus/validators/errors.go deleted file mode 100644 index 9276c1ca7f4..00000000000 --- a/consensus/validators/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package validators - -import ( - "errors" -) - -// ErrNilStake signals that a nil stake structure has been provided -var ErrNilStake = errors.New("nil stake") - -// ErrNegativeStake signals that the stake is negative -var ErrNegativeStake = errors.New("negative stake") - -// ErrNilPubKey signals that the public key is nil -var ErrNilPubKey = errors.New("nil public key") diff --git a/consensus/validators/groupSelectors/errors.go b/consensus/validators/groupSelectors/errors.go deleted file mode 100644 index 986f109ddea..00000000000 --- a/consensus/validators/groupSelectors/errors.go +++ /dev/null @@ -1,29 +0,0 @@ -package groupSelectors - -import ( - "errors" -) - -// ErrNilInputSlice signals that a nil slice has been provided -var ErrNilInputSlice = errors.New("nil input slice") - -// ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size -var ErrSmallEligibleListSize = errors.New("small eligible list size") - -// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) -var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") - -// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap -var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") - -// ErrEligibleTooManySelections signals an invalid selection for consensus group -var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") - -// ErrEligibleTooFewSelections signals an invalid selection for consensus group -var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") - -// ErrNilRandomness signals that a nil randomness source has been provided -var ErrNilRandomness = errors.New("nil randomness source") - -// ErrNilHasher signals that a nil hasher has been provided -var ErrNilHasher = errors.New("nil hasher") diff --git a/consensus/validators/groupSelectors/export_test.go b/consensus/validators/groupSelectors/export_test.go deleted file mode 100644 index ba4f0f77611..00000000000 --- a/consensus/validators/groupSelectors/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package groupSelectors - -import ( - "github.com/ElrondNetwork/elrond-go/consensus" -) - -func (ihgs *indexHashedGroupSelector) EligibleList() []consensus.Validator { - return ihgs.eligibleList -} diff --git a/consensus/validators/groupSelectors/indexHashedGroup.go b/consensus/validators/groupSelectors/indexHashedGroup.go deleted file mode 100644 index b947a9ba7ca..00000000000 --- a/consensus/validators/groupSelectors/indexHashedGroup.go +++ /dev/null @@ -1,186 +0,0 @@ -package groupSelectors - -import ( - "bytes" - "encoding/binary" - "math/big" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/hashing" -) - -type indexHashedGroupSelector struct { - hasher hashing.Hasher - eligibleList []consensus.Validator - expandedEligibleList []consensus.Validator - consensusGroupSize int -} - -// NewIndexHashedGroupSelector creates a new index hashed group selector -func NewIndexHashedGroupSelector(consensusGroupSize int, hasher hashing.Hasher) (*indexHashedGroupSelector, error) { - if hasher == nil || hasher.IsInterfaceNil() { - return nil, ErrNilHasher - } - - ihgs := &indexHashedGroupSelector{ - hasher: hasher, - eligibleList: make([]consensus.Validator, 0), - expandedEligibleList: make([]consensus.Validator, 0), - } - - err := ihgs.SetConsensusGroupSize(consensusGroupSize) - if err != nil { - return nil, err - } - - return ihgs, nil -} - -// LoadEligibleList loads the eligible list -func (ihgs *indexHashedGroupSelector) LoadEligibleList(eligibleList []consensus.Validator) error { - if eligibleList == nil { - return ErrNilInputSlice - } - - ihgs.eligibleList = make([]consensus.Validator, len(eligibleList)) - copy(ihgs.eligibleList, eligibleList) - return nil -} - -// ComputeValidatorsGroup will generate a list of validators based on the the eligible list, -// consensus group size and a randomness source -// Steps: -// 1. generate expanded eligible list by multiplying entries from eligible list according to stake and rating -> TODO -// 2. for each value in [0, consensusGroupSize), compute proposedindex = Hash( [index as string] CONCAT randomness) % len(eligible list) -// 3. if proposed index is already in the temp validator list, then proposedIndex++ (and then % len(eligible list) as to not -// exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until -// the item at the new proposed index is not found in the list. This new proposed index will be called checked index -// 4. the item at the checked index is appended in the temp validator list -func (ihgs *indexHashedGroupSelector) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { - if len(ihgs.eligibleList) < ihgs.consensusGroupSize { - return nil, ErrSmallEligibleListSize - } - - if randomness == nil { - return nil, ErrNilRandomness - } - - ihgs.expandedEligibleList = ihgs.expandEligibleList() - - tempList := make([]consensus.Validator, 0) - - for startIdx := 0; startIdx < ihgs.consensusGroupSize; startIdx++ { - proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) - - checkedIndex := ihgs.checkIndex(proposedIndex, tempList) - tempList = append(tempList, ihgs.expandedEligibleList[checkedIndex]) - } - - return tempList, nil -} - -// GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap -// TODO: This function needs to be revised when the requirements are clarified -func (ihgs *indexHashedGroupSelector) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { - selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte - shardEligibleLen := uint16(len(ihgs.eligibleList)) - invalidSelection := selectionLen < shardEligibleLen - - if invalidSelection { - return nil, ErrEligibleSelectionMismatch - } - - publicKeys = make([]string, ihgs.consensusGroupSize) - cnt := 0 - - for i := uint16(0); i < shardEligibleLen; i++ { - isSelected := (selection[i/8] & (1 << (i % 8))) != 0 - - if !isSelected { - continue - } - - publicKeys[cnt] = string(ihgs.eligibleList[i].PubKey()) - cnt++ - - if cnt > ihgs.consensusGroupSize { - return nil, ErrEligibleTooManySelections - } - } - - if cnt < ihgs.consensusGroupSize { - return nil, ErrEligibleTooFewSelections - } - - return publicKeys, nil -} - -func (ihgs *indexHashedGroupSelector) expandEligibleList() []consensus.Validator { - //TODO implement an expand eligible list variant - return ihgs.eligibleList -} - -// computeListIndex computes a proposed index from expanded eligible list -func (ihgs *indexHashedGroupSelector) computeListIndex(currentIndex int, randomSource string) int { - buffCurrentIndex := make([]byte, 8) - binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) - - indexHash := ihgs.hasher.Compute(string(buffCurrentIndex) + randomSource) - - computedLargeIndex := big.NewInt(0) - computedLargeIndex.SetBytes(indexHash) - - // computedListIndex = computedLargeIndex % len(expandedEligibleList) - computedListIndex := big.NewInt(0).Mod(computedLargeIndex, big.NewInt(int64(len(ihgs.expandedEligibleList)))).Int64() - return int(computedListIndex) -} - -// checkIndex returns a checked index starting from a proposed index -func (ihgs *indexHashedGroupSelector) checkIndex(proposedIndex int, selectedList []consensus.Validator) int { - - for { - v := ihgs.expandedEligibleList[proposedIndex] - - if ihgs.validatorIsInList(v, selectedList) { - proposedIndex++ - proposedIndex = proposedIndex % len(ihgs.expandedEligibleList) - continue - } - - return proposedIndex - } -} - -// validatorIsInList returns true if a validator has been found in provided list -func (ihgs *indexHashedGroupSelector) validatorIsInList(v consensus.Validator, list []consensus.Validator) bool { - for i := 0; i < len(list); i++ { - if bytes.Equal(v.PubKey(), list[i].PubKey()) { - return true - } - } - - return false -} - -// ConsensusGroupSize returns the consensus group size -func (ihgs *indexHashedGroupSelector) ConsensusGroupSize() int { - return ihgs.consensusGroupSize -} - -// SetConsensusGroupSize sets the consensus group size -func (ihgs *indexHashedGroupSelector) SetConsensusGroupSize(consensusGroupSize int) error { - if consensusGroupSize < 1 { - return ErrInvalidConsensusGroupSize - } - - ihgs.consensusGroupSize = consensusGroupSize - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ihgs *indexHashedGroupSelector) IsInterfaceNil() bool { - if ihgs == nil { - return true - } - return false -} diff --git a/consensus/validators/groupSelectors/indexHashedGroup_test.go b/consensus/validators/groupSelectors/indexHashedGroup_test.go deleted file mode 100644 index dbc8d0fee89..00000000000 --- a/consensus/validators/groupSelectors/indexHashedGroup_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package groupSelectors_test - -import ( - "encoding/binary" - "math/big" - "strconv" - "testing" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/mock" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" - "github.com/stretchr/testify/assert" -) - -func convertBigIntToBytes(value *big.Int) []byte { - return value.Bytes() -} - -func uint64ToBytes(value uint64) []byte { - buff := make([]byte, 8) - - binary.BigEndian.PutUint64(buff, value) - return buff -} - -//------- NewIndexHashedGroupSelector - -func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, nil) - - assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrNilHasher, err) -} - -func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(0, mock.HasherMock{}) - - assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrInvalidConsensusGroupSize, err) -} - -func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - assert.NotNil(t, ihgs) - assert.Nil(t, err) -} - -//------- LoadEligibleList - -func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - assert.Equal(t, groupSelectors.ErrNilInputSlice, ihgs.LoadEligibleList(nil)) -} - -func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - err := ihgs.LoadEligibleList(list) - assert.Nil(t, err) - assert.Equal(t, list, ihgs.EligibleList()) -} - -//------- ComputeValidatorsGroup - -func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - list := make([]consensus.Validator, 0) - - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup(nil) - - assert.Nil(t, list2) - assert.Equal(t, groupSelectors.ErrNilRandomness, err) -} - -//------- functionality tests - -func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in order: - //element 0 will be first element - //element 1 will be the second - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(1)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrder(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in reverse order: - //element 0 will be the second - //element 1 will be the first - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(1)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) - - list := []consensus.Validator{ - validator0, - validator1, - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, validator0, list2[1]) - assert.Equal(t, validator1, list2[0]) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in order: - //element 0 will be the first - //element 1 will be the second as the same index is being returned and 0 is already in list - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsShouldWork(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //script: - // for index 0, hasher will return 11 which will translate to 1, so 1 is the first element - // for index 1, hasher will return 1 which will translate to 1, 1 is already picked, try the next, 2 is the second element - // for index 2, hasher will return 9 which will translate to 9, 9 is the 3-rd element - // for index 3, hasher will return 9 which will translate to 9, 9 is already picked, try the next one, 0 is the 4-th element - // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, - // 3 is the 4-th element - // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element - - script := make(map[string]*big.Int) - script[string(uint64ToBytes(0))+randomness] = big.NewInt(11) //will translate to 1, add 1 - script[string(uint64ToBytes(1))+randomness] = big.NewInt(1) //will translate to 1, add 2 - script[string(uint64ToBytes(2))+randomness] = big.NewInt(9) //will translate to 9, add 9 - script[string(uint64ToBytes(3))+randomness] = big.NewInt(9) //will translate to 9, add 0 - script[string(uint64ToBytes(4))+randomness] = big.NewInt(0) //will translate to 0, add 3 - script[string(uint64ToBytes(5))+randomness] = big.NewInt(9) //will translate to 9, add 4 - - hasher.ComputeCalled = func(s string) []byte { - val, ok := script[s] - - if !ok { - assert.Fail(t, "should have not got here") - } - - return convertBigIntToBytes(val) - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(6, hasher) - - validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) - validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2")) - validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3")) - validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4")) - validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5")) - validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6")) - validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7")) - validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8")) - validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9")) - - list := []consensus.Validator{ - validator0, - validator1, - validator2, - validator3, - validator4, - validator5, - validator6, - validator7, - validator8, - validator9, - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, 6, len(list2)) - //check order as described in script - assert.Equal(t, validator1, list2[0]) - assert.Equal(t, validator2, list2[1]) - assert.Equal(t, validator9, list2[2]) - assert.Equal(t, validator0, list2[3]) - assert.Equal(t, validator3, list2[4]) - assert.Equal(t, validator4, list2[5]) - -} - -func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { - consensusGroupSize := 21 - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}) - - list := make([]consensus.Validator, 0) - - //generate 400 validators - for i := 0; i < 400; i++ { - list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) - } - _ = ihgs.LoadEligibleList(list) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - randomness := strconv.Itoa(i) - - list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Equal(b, consensusGroupSize, len(list2)) - } -} diff --git a/consensus/validators/validator_test.go b/consensus/validators/validator_test.go deleted file mode 100644 index 9f0cefe817c..00000000000 --- a/consensus/validators/validator_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package validators_test - -import ( - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/consensus/validators" - "github.com/stretchr/testify/assert" -) - -func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(nil, 0, []byte("pk1")) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilStake, err) -} - -func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(-1), 0, []byte("pk1")) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNegativeStake, err) -} - -func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(0), 0, nil) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilPubKey, err) -} - -func TestValidator_NewValidatorShouldWork(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) - - assert.NotNil(t, validator) - assert.Nil(t, err) -} - -func TestValidator_StakeShouldWork(t *testing.T) { - t.Parallel() - - validator, _ := validators.NewValidator(big.NewInt(1), 0, []byte("pk1")) - - assert.Equal(t, big.NewInt(1), validator.Stake()) -} - -func TestValidator_PubKeyShouldWork(t *testing.T) { - t.Parallel() - - validator, _ := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) - - assert.Equal(t, []byte("pk1"), validator.PubKey()) -} diff --git a/core/indexer/data.go b/core/indexer/data.go index 21917298663..a3ac6d48bbe 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -13,6 +13,7 @@ type Transaction struct { MBHash string `json:"miniBlockHash"` BlockHash string `json:"blockHash"` Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` Value *big.Int `json:"value"` Receiver string `json:"receiver"` Sender string `json:"sender"` diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index a6c126cb2cb..168280060a4 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -16,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" @@ -505,6 +506,11 @@ func getTransactionByType( return buildSmartContractResult(currentSc, txHash, mbHash, blockHash, mb, header) } + currentReward, ok := tx.(*rewardTx.RewardTx) + if ok && currentReward != nil { + return buildRewardTransaction(currentReward, txHash, mbHash, blockHash, mb, header) + } + return nil } @@ -522,6 +528,7 @@ func buildTransaction( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: tx.Nonce, + Round: header.GetRound(), Value: tx.Value, Receiver: hex.EncodeToString(tx.RcvAddr), Sender: hex.EncodeToString(tx.SndAddr), @@ -549,6 +556,7 @@ func buildSmartContractResult( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: scr.Nonce, + Round: header.GetRound(), Value: scr.Value, Receiver: hex.EncodeToString(scr.RcvAddr), Sender: hex.EncodeToString(scr.SndAddr), @@ -562,3 +570,34 @@ func buildSmartContractResult( Status: "Success", } } + +func buildRewardTransaction( + rTx *rewardTx.RewardTx, + txHash []byte, + mbHash []byte, + blockHash []byte, + mb *block.MiniBlock, + header data.HeaderHandler, +) *Transaction { + + shardIdStr := fmt.Sprintf("Shard%d", rTx.ShardId) + + return &Transaction{ + Hash: hex.EncodeToString(txHash), + MBHash: hex.EncodeToString(mbHash), + BlockHash: hex.EncodeToString(blockHash), + Nonce: 0, + Round: rTx.Round, + Value: rTx.Value, + Receiver: hex.EncodeToString(rTx.RcvAddr), + Sender: shardIdStr, + ReceiverShard: mb.ReceiverShardID, + SenderShard: mb.SenderShardID, + GasPrice: 0, + GasLimit: 0, + Data: "", + Signature: "", + Timestamp: time.Duration(header.GetTimeStamp()), + Status: "Success", + } +} diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go new file mode 100644 index 00000000000..db5df6a5a28 --- /dev/null +++ b/data/address/specialAddresses.go @@ -0,0 +1,168 @@ +package address + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type specialAddresses struct { + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData + elrondAddress []byte + burnAddress []byte + + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator +} + +// NewSpecialAddressHolder creates a special address holder +func NewSpecialAddressHolder( + elrondAddress []byte, + burnAddress []byte, + adrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) (*specialAddresses, error) { + if elrondAddress == nil { + return nil, data.ErrNilElrondAddress + } + if burnAddress == nil { + return nil, data.ErrNilBurnAddress + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, data.ErrNilAddressConverter + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, data.ErrNilShardCoordinator + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, data.ErrNilNodesCoordinator + } + + sp := &specialAddresses{ + elrondAddress: elrondAddress, + burnAddress: burnAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } + + return sp, nil +} + +// SetShardConsensusData - sets the reward addresses for the current consensus group +func (sp *specialAddresses) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error { + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, round, shardID, + ) + if err != nil { + return err + } + + sp.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: consensusAddresses, + } + + return nil +} + +// SetElrondCommunityAddress sets elrond address +func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { + sp.elrondAddress = elrond +} + +// ElrondCommunityAddress provides elrond address +func (sp *specialAddresses) ElrondCommunityAddress() []byte { + return sp.elrondAddress +} + +// BurnAddress provides burn address +func (sp *specialAddresses) BurnAddress() []byte { + return sp.burnAddress +} + +// ConsensusShardRewardData provides the consensus data required for generating the rewards for shard nodes +func (sp *specialAddresses) ConsensusShardRewardData() *data.ConsensusRewardData { + return sp.shardConsensusData +} + +// SetMetaConsensusData sets the rewards addresses for the metachain nodes +func (sp *specialAddresses) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + rewardAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, + round, + sharding.MetachainShardId, + ) + if err != nil { + return err + } + + sp.metaConsensusData = append(sp.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + }) + + return nil +} + +// ClearMetaConsensusData clears the previously set addresses for rewarding metachain nodes +func (sp *specialAddresses) ClearMetaConsensusData() { + sp.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +// ConsensusMetaRewardData provides the consensus data required for generating the rewards for metachain nodes +func (sp *specialAddresses) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sp.metaConsensusData +} + +// LeaderAddress provides leader address +func (sp *specialAddresses) LeaderAddress() []byte { + if sp.shardConsensusData == nil || len(sp.shardConsensusData.Addresses) == 0 { + return nil + } + + return []byte(sp.shardConsensusData.Addresses[0]) +} + +// Round returns the round for the current block +func (sp *specialAddresses) Round() uint64 { + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Round +} + +// Epoch returns the epoch for the current block +func (sp *specialAddresses) Epoch() uint32 { + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Epoch +} + +// ShardIdForAddress calculates shard id for address +func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { + convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) + if err != nil { + return 0, err + } + + return sp.shardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sp *specialAddresses) IsInterfaceNil() bool { + if sp == nil { + return true + } + return false +} diff --git a/data/address/specialAddresses_test.go b/data/address/specialAddresses_test.go new file mode 100644 index 00000000000..6fbf64f894a --- /dev/null +++ b/data/address/specialAddresses_test.go @@ -0,0 +1,307 @@ +package address + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +type Args struct { + ElrondCommunityAddress []byte + BurnAddress []byte + AddrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordiator sharding.NodesCoordinator +} + +func initDefaultArgs() *Args { + args := &Args{ + ElrondCommunityAddress: []byte("community"), + BurnAddress: []byte("burn"), + AddrConv: &mock.AddressConverterMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), + NodesCoordiator: mock.NewNodesCoordinatorMock(), + } + + return args +} + +func createSpecialAddressFromArgs(args *Args) (process.SpecialAddressHandler, error) { + addr, err := NewSpecialAddressHolder( + args.ElrondCommunityAddress, + args.BurnAddress, + args.AddrConv, + args.ShardCoordinator, + args.NodesCoordiator, + ) + return addr, err +} + +func createDefaultSpecialAddress() process.SpecialAddressHandler { + args := initDefaultArgs() + addr, _ := createSpecialAddressFromArgs(args) + + return addr +} + +func TestNewSpecialAddressHolderNilCommunityAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ElrondCommunityAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilElrondAddress, err) +} + +func TestNewSpecialAddressHolderNilBurnAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.BurnAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilBurnAddress, err) +} + +func TestNewSpecialAddressHolderNilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.AddrConv = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilAddressConverter, err) +} + +func TestNewSpecialAddressHolderNilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ShardCoordinator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilShardCoordinator, err) +} + +func TestNewSpecialAddressHolderNilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.NodesCoordiator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilNodesCoordinator, err) +} + +func TestNewSpecialAddressHolderOK(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + addr, err := createSpecialAddressFromArgs(args) + + assert.NotNil(t, addr) + assert.Nil(t, err) +} + +func TestSpecialAddresses_ClearMetaConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + addr.ClearMetaConsensusData() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingOnceOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + err := addr.SetMetaConsensusData([]byte("randomness"), 0, 0) + assert.Nil(t, err) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingMultipleOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + err := addr.SetMetaConsensusData([]byte("randomness"), uint64(i), 0) + assert.Nil(t, err) + } +} + +func TestSpecialAddresses_ConsensusMetaRewardDataNoConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataOneConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + _ = addr.SetMetaConsensusData([]byte("randomness"), 1, 2) + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 1, len(metaConsensusData)) + assert.Equal(t, uint64(1), metaConsensusData[0].Round) + assert.Equal(t, uint32(2), metaConsensusData[0].Epoch) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataMultipleConsensusesDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + _ = addr.SetMetaConsensusData([]byte("randomness"), uint64(i+1), uint32(i+2)) + } + + metaConsensusData := addr.ConsensusMetaRewardData() + assert.Equal(t, nConsensuses, len(metaConsensusData)) + + for i := 0; i < nConsensuses; i++ { + assert.Equal(t, uint64(i+1), metaConsensusData[i].Round) + assert.Equal(t, uint32(i+2), metaConsensusData[i].Epoch) + } +} + +func TestSpecialAddresses_ConsensusShardRewardDataNoData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + shardRewardData := addr.ConsensusShardRewardData() + + assert.Nil(t, shardRewardData) +} + +func TestSpecialAddresses_ConsensusShardRewardDataExistingData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + shardRewardData := addr.ConsensusShardRewardData() + + assert.NotNil(t, shardRewardData) + assert.Equal(t, uint64(1), shardRewardData.Round) + assert.Equal(t, uint32(2), shardRewardData.Epoch) +} + +func TestSpecialAddresses_SetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + err := addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + + assert.Nil(t, err) +} + +func TestSpecialAddresses_BurnAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + burnAddr := addr.BurnAddress() + + assert.Equal(t, []byte("burn"), burnAddr) +} + +func TestSpecialAddresses_ElrondCommunityAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + communityAddr := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddr) +} + +func TestSpecialAddresses_LeaderAddressNoSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + leaderAddress := addr.LeaderAddress() + + assert.Nil(t, leaderAddress) +} + +func TestSpecialAddresses_LeaderAddressSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 0, 0, 0) + leaderAddress := addr.LeaderAddress() + + assert.Equal(t, "address00", string(leaderAddress)) +} + +func TestSpecialAddresses_Round(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + round := addr.Round() + + assert.Equal(t, uint64(1), round) +} + +func TestSpecialAddresses_Epoch(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + epoch := addr.Epoch() + + assert.Equal(t, uint32(2), epoch) +} + +func TestSpecialAddresses_SetElrondCommunityAddress(t *testing.T) { + addr := createDefaultSpecialAddress() + communityAddress := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddress) +} + +func TestSpecialAddresses_ShardIdForAddress(t *testing.T) { + args := initDefaultArgs() + args.ShardCoordinator = &mock.MultipleShardsCoordinatorMock{ + NoShards: 4, + ComputeIdCalled: func(address state.AddressContainer) uint32 { + return uint32(address.Bytes()[0]) + }, + CurrentShard: 0, + } + addr, _ := createSpecialAddressFromArgs(args) + shardId, err := addr.ShardIdForAddress([]byte{3}) + + assert.Nil(t, err) + assert.Equal(t, uint32(3), shardId) +} + +func TestSpecialAddresses_IsInterfaceNil(t *testing.T) { + addr := &specialAddresses{} + + addr = nil + isNil := addr.IsInterfaceNil() + + assert.True(t, isNil) +} diff --git a/data/block/block.go b/data/block/block.go index 5485ad58750..123822c2e22 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -34,8 +34,10 @@ const ( PeerBlock Type = 2 // SmartContractResultBlock identifies a miniblock holding smartcontractresults SmartContractResultBlock Type = 3 + // RewardsBlock identifies a miniblock holding accumulated rewards, both system generated and from tx fees + RewardsBlock Type = 4 // InvalidBlock identifies identifies an invalid miniblock - InvalidBlock Type = 4 + InvalidBlock Type = 5 ) // String returns the string representation of the Type @@ -49,6 +51,8 @@ func (bType Type) String() string { return "PeerBody" case SmartContractResultBlock: return "SmartContractResultBody" + case RewardsBlock: + return "RewardsBody" case InvalidBlock: return "InvalidBlock" default: diff --git a/data/CapnpHelper.go b/data/capnpHelper.go similarity index 100% rename from data/CapnpHelper.go rename to data/capnpHelper.go diff --git a/data/consensusRewardData.go b/data/consensusRewardData.go new file mode 100644 index 00000000000..731838d5322 --- /dev/null +++ b/data/consensusRewardData.go @@ -0,0 +1,8 @@ +package data + +// ConsensusRewardData holds the required data for rewarding validators in a specific round and epoch +type ConsensusRewardData struct { + Round uint64 + Epoch uint32 + Addresses []string +} diff --git a/data/errors.go b/data/errors.go index 200f5c2d76e..57509288aeb 100644 --- a/data/errors.go +++ b/data/errors.go @@ -27,3 +27,18 @@ var ErrMiniBlockEmpty = errors.New("mini block is empty") // ErrWrongTypeAssertion signals that wrong type was provided var ErrWrongTypeAssertion = errors.New("wrong type assertion") + +// ErrNilElrondAddress signals that nil elrond address was provided +var ErrNilElrondAddress = errors.New("nil elrond address") + +// ErrNilBurnAddress signals that nil burn address was provided +var ErrNilBurnAddress = errors.New("nil burn address") + +// ErrNilAddressConverter signals that nil address converter was provided +var ErrNilAddressConverter = errors.New("nil address converter") + +// ErrNilShardCoordinator signals that nil shard coordinator was provided +var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilNodesCoordinator signals that nil shard coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") diff --git a/data/mock/addressConverterMock.go b/data/mock/addressConverterMock.go new file mode 100644 index 00000000000..de5572b249e --- /dev/null +++ b/data/mock/addressConverterMock.go @@ -0,0 +1,66 @@ +package mock + +import ( + "bytes" + "encoding/hex" + "errors" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +var errFailure = errors.New("failure") + +type AddressConverterMock struct { + Fail bool + CreateAddressFromPublicKeyBytesRetErrForValue []byte +} + +func (acm *AddressConverterMock) CreateAddressFromPublicKeyBytes(pubKey []byte) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + if acm.CreateAddressFromPublicKeyBytesRetErrForValue != nil { + if bytes.Equal(acm.CreateAddressFromPublicKeyBytesRetErrForValue, pubKey) { + return nil, errors.New("error required") + } + } + + return NewAddressMockFromBytes(pubKey), nil +} + +func (acm *AddressConverterMock) ConvertToHex(addressContainer state.AddressContainer) (string, error) { + if acm.Fail { + return "", errFailure + } + + return hex.EncodeToString(addressContainer.Bytes()), nil +} + +func (acm *AddressConverterMock) CreateAddressFromHex(hexAddress string) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) PrepareAddressBytes(addressBytes []byte) ([]byte, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) AddressLen() int { + return 32 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (acm *AddressConverterMock) IsInterfaceNil() bool { + if acm == nil { + return true + } + return false +} diff --git a/data/mock/multipleShardsCoordinatorMock.go b/data/mock/multipleShardsCoordinatorMock.go new file mode 100644 index 00000000000..38a5ab1814e --- /dev/null +++ b/data/mock/multipleShardsCoordinatorMock.go @@ -0,0 +1,70 @@ +package mock + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type MultipleShardsCoordinatorMock struct { + NoShards uint32 + ComputeIdCalled func(address state.AddressContainer) uint32 + CurrentShard uint32 +} + +func NewMultipleShardsCoordinatorMock() *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: 1} +} + +func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: nrShard} +} + +func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { + return scm.NoShards +} + +func (scm *MultipleShardsCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { + if scm.ComputeIdCalled == nil { + return scm.SelfId() + } + return scm.ComputeIdCalled(address) +} + +func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { + return scm.CurrentShard +} + +func (scm *MultipleShardsCoordinatorMock) SetSelfId(shardId uint32) error { + return nil +} + +func (scm *MultipleShardsCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +func (scm *MultipleShardsCoordinatorMock) SetNoShards(noShards uint32) { + scm.NoShards = noShards +} + +// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID +// identifier is generated such as the first shard from identifier is always smaller than the last +func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { + if destShardID == scm.CurrentShard { + return fmt.Sprintf("_%d", scm.CurrentShard) + } + + if destShardID < scm.CurrentShard { + return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) + } + + return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { + if scm == nil { + return true + } + return false +} diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..56143df1eac --- /dev/null +++ b/data/mock/nodesCoordinatorMock.go @@ -0,0 +1,191 @@ +package mock + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + shards := make([]uint32, nbShards+1) + for i := uint32(0); i < nbShards; i++ { + shards[i] = i + } + shards[nbShards] = sharding.MetachainShardId + + for _, sh := range shards { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + pubKeys := make([]string, 0) + + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { + var consensusSize uint32 + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } + + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/data/mock/txTypeHandlerMock.go b/data/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..2fcaeaf25d3 --- /dev/null +++ b/data/mock/txTypeHandlerMock.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} diff --git a/data/mock/unsignedTxHandlerMock.go b/data/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7097c4a31e8 --- /dev/null +++ b/data/mock/unsignedTxHandlerMock.go @@ -0,0 +1,53 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} diff --git a/data/rewardTx/capnp/schema.capnp b/data/rewardTx/capnp/schema.capnp new file mode 100644 index 00000000000..8b963360616 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp @@ -0,0 +1,19 @@ +@0xa6e50837d4563fc2; +using Go = import "/go.capnp"; +$Go.package("capnp"); +$Go.import("_"); + +struct RewardTxCapn { + round @0: UInt64; + epoch @1: UInt32; + value @2: Data; + rcvAddr @3: Data; + shardId @4: UInt32; +} + +##compile with: + +## +## +## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp/schema.capnp + diff --git a/data/rewardTx/capnp/schema.capnp.go b/data/rewardTx/capnp/schema.capnp.go new file mode 100644 index 00000000000..f9e5247b348 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp.go @@ -0,0 +1,271 @@ +package capnp + +// AUTO GENERATED - DO NOT EDIT + +import ( + "bufio" + "bytes" + "encoding/json" + C "github.com/glycerine/go-capnproto" + "io" +) + +type RewardTxCapn C.Struct + +func NewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStruct(16, 2)) } +func NewRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewRootStruct(16, 2)) } +func AutoNewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStructAR(16, 2)) } +func ReadRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.Root(0).ToStruct()) } +func (s RewardTxCapn) Round() uint64 { return C.Struct(s).Get64(0) } +func (s RewardTxCapn) SetRound(v uint64) { C.Struct(s).Set64(0, v) } +func (s RewardTxCapn) Epoch() uint32 { return C.Struct(s).Get32(8) } +func (s RewardTxCapn) SetEpoch(v uint32) { C.Struct(s).Set32(8, v) } +func (s RewardTxCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s RewardTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s RewardTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s RewardTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s RewardTxCapn) ShardId() uint32 { return C.Struct(s).Get32(12) } +func (s RewardTxCapn) SetShardId(v uint32) { C.Struct(s).Set32(12, v) } +func (s RewardTxCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"round\":") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"epoch\":") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"value\":") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"rcvAddr\":") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"shardId\":") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s RewardTxCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("round = ") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("epoch = ") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("value = ") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("rcvAddr = ") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("shardId = ") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type RewardTxCapn_List C.PointerList + +func NewRewardTxCapnList(s *C.Segment, sz int) RewardTxCapn_List { + return RewardTxCapn_List(s.NewCompositeList(16, 2, sz)) +} +func (s RewardTxCapn_List) Len() int { return C.PointerList(s).Len() } +func (s RewardTxCapn_List) At(i int) RewardTxCapn { + return RewardTxCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s RewardTxCapn_List) ToArray() []RewardTxCapn { + n := s.Len() + a := make([]RewardTxCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s RewardTxCapn_List) Set(i int, item RewardTxCapn) { C.PointerList(s).Set(i, C.Object(item)) } diff --git a/data/rewardTx/rewardTx.go b/data/rewardTx/rewardTx.go new file mode 100644 index 00000000000..0bee4c200c1 --- /dev/null +++ b/data/rewardTx/rewardTx.go @@ -0,0 +1,119 @@ +package rewardTx + +import ( + "io" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp" + "github.com/glycerine/go-capnproto" +) + +// RewardTx holds the data for a reward transaction +type RewardTx struct { + Round uint64 `capid:"1" json:"round"` + Epoch uint32 `capid:"2" json:"epoch"` + Value *big.Int `capid:"3" json:"value"` + RcvAddr []byte `capid:"4" json:"receiver"` + ShardId uint32 `capid:"5" json:"shardId"` +} + +// Save saves the serialized data of a RewardTx into a stream through Capnp protocol +func (scr *RewardTx) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + RewardTxGoToCapn(seg, scr) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a RewardTx object through Capnp protocol +func (scr *RewardTx) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + + z := capnp.ReadRootRewardTxCapn(capMsg) + RewardTxCapnToGo(z, scr) + return nil +} + +// RewardTxCapnToGo is a helper function to copy fields from a RewardTxCapn object to a RewardTx object +func RewardTxCapnToGo(src capnp.RewardTxCapn, dest *RewardTx) *RewardTx { + if dest == nil { + dest = &RewardTx{} + } + + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + + dest.Epoch = src.Epoch() + dest.Round = src.Round() + err := dest.Value.GobDecode(src.Value()) + + if err != nil { + return nil + } + + dest.RcvAddr = src.RcvAddr() + dest.ShardId = src.ShardId() + + return dest +} + +// RewardTxGoToCapn is a helper function to copy fields from a RewardTx object to a RewardTxCapn object +func RewardTxGoToCapn(seg *capn.Segment, src *RewardTx) capnp.RewardTxCapn { + dest := capnp.AutoNewRewardTxCapn(seg) + + value, _ := src.Value.GobEncode() + dest.SetEpoch(src.Epoch) + dest.SetRound(src.Round) + dest.SetValue(value) + dest.SetRcvAddr(src.RcvAddr) + dest.SetShardId(src.ShardId) + + return dest +} + +// IsInterfaceNil verifies if underlying object is nil +func (scr *RewardTx) IsInterfaceNil() bool { + return scr == nil +} + +// GetValue returns the value of the reward transaction +func (scr *RewardTx) GetValue() *big.Int { + return scr.Value +} + +// GetData returns the data of the reward transaction +func (scr *RewardTx) GetData() string { + return "" +} + +// GetRecvAddress returns the receiver address from the reward transaction +func (scr *RewardTx) GetRecvAddress() []byte { + return scr.RcvAddr +} + +// GetSndAddress returns the sender address from the reward transaction +func (scr *RewardTx) GetSndAddress() []byte { + return nil +} + +// SetValue sets the value of the reward transaction +func (scr *RewardTx) SetValue(value *big.Int) { + scr.Value = value +} + +// SetData sets the data of the reward transaction +func (scr *RewardTx) SetData(data string) { +} + +// SetRecvAddress sets the receiver address of the reward transaction +func (scr *RewardTx) SetRecvAddress(addr []byte) { + scr.RcvAddr = addr +} + +// SetSndAddress sets the sender address of the reward transaction +func (scr *RewardTx) SetSndAddress(addr []byte) { +} diff --git a/data/rewardTx/rewardTx_test.go b/data/rewardTx/rewardTx_test.go new file mode 100644 index 00000000000..80930abac26 --- /dev/null +++ b/data/rewardTx/rewardTx_test.go @@ -0,0 +1,68 @@ +package rewardTx_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/stretchr/testify/assert" +) + +func TestRewardTx_SaveLoad(t *testing.T) { + smrS := rewardTx.RewardTx{ + Round: uint64(1), + Epoch: uint32(1), + Value: big.NewInt(1), + RcvAddr: []byte("receiver_address"), + ShardId: 10, + } + + var b bytes.Buffer + err := smrS.Save(&b) + assert.Nil(t, err) + + loadSMR := rewardTx.RewardTx{} + err = loadSMR.Load(&b) + assert.Nil(t, err) + + assert.Equal(t, smrS, loadSMR) +} + +func TestRewardTx_GetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{RcvAddr: data} + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_GetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{Value: value} + + assert.Equal(t, value, scr.Value) +} + +func TestRewardTx_SetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{} + scr.SetRecvAddress(data) + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_SetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{} + scr.SetValue(value) + + assert.Equal(t, value, scr.Value) +} diff --git a/data/state/errors.go b/data/state/errors.go index c801162cc21..2d7196f5353 100644 --- a/data/state/errors.go +++ b/data/state/errors.go @@ -130,3 +130,15 @@ var ErrBech32ConvertError = errors.New("can't convert bech32 string") // ErrBech32WrongAddr signals that the string provided might not be in bech32 format var ErrBech32WrongAddr = errors.New("wrong bech32 string") + +// ErrNilStake signals that the provided stake is nil +var ErrNilStake = errors.New("stake is nil") + +// ErrNilSchnorrPublicKey signals that the provided schnorr public is nil +var ErrNilSchnorrPublicKey = errors.New("schnorr public key is nil") + +// ErrNilBLSPublicKey signals that the provided BLS public key is nil +var ErrNilBLSPublicKey = errors.New("bls public key is nil") + +// ErrUnknownAccountType signals that the provided account type is unknown +var ErrUnknownAccountType = errors.New("account type is unknown") diff --git a/data/state/factory/accountCreatorFactory.go b/data/state/factory/accountCreatorFactory.go index 8aa90bc777f..70e297b53b0 100644 --- a/data/state/factory/accountCreatorFactory.go +++ b/data/state/factory/accountCreatorFactory.go @@ -2,22 +2,30 @@ package factory import ( "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" ) -// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id -func NewAccountFactoryCreator(coordinator sharding.Coordinator) (state.AccountFactory, error) { - if coordinator == nil { - return nil, state.ErrNilShardCoordinator - } +// Type defines account types to save in accounts trie +type Type uint8 - if coordinator.SelfId() < coordinator.NumberOfShards() { - return NewAccountCreator(), nil - } +const ( + // UserAccount identifies an account holding balance, storage updates, code + UserAccount Type = 0 + // ShardStatistics identifies a shard, keeps the statistics + ShardStatistics Type = 1 + // ValidatorAccount identifies an account holding stake, crypto public keys, assigned shard, rating + ValidatorAccount Type = 2 +) - if coordinator.SelfId() == sharding.MetachainShardId { +// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id +func NewAccountFactoryCreator(accountType Type) (state.AccountFactory, error) { + switch accountType { + case UserAccount: + return NewAccountCreator(), nil + case ShardStatistics: return NewMetaAccountCreator(), nil + case ValidatorAccount: + return NewPeerAccountCreator(), nil + default: + return nil, state.ErrUnknownAccountType } - - return nil, state.ErrUnknownShardId } diff --git a/data/state/factory/accountCreatorFactory_test.go b/data/state/factory/accountCreatorFactory_test.go index f0c77ea8b05..d2852ff5a7c 100644 --- a/data/state/factory/accountCreatorFactory_test.go +++ b/data/state/factory/accountCreatorFactory_test.go @@ -6,63 +6,55 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) -func TestNewAccountFactoryCreator_NilShardCoordinator(t *testing.T) { +func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { t.Parallel() - accF, err := factory.NewAccountFactoryCreator(nil) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) + assert.Nil(t, err) - assert.Equal(t, err, state.ErrNilShardCoordinator) - assert.Nil(t, accF) + accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + _, ok := accWrp.(*state.Account) + assert.Equal(t, true, ok) + + assert.Nil(t, err) + assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { +func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) - _, ok := accWrp.(*state.Account) + _, ok := accWrp.(*state.MetaAccount) assert.Equal(t, true, ok) assert.Nil(t, err) assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { +func TestNewAccountFactoryCreator_PeerAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) - _, ok := accWrp.(*state.MetaAccount) + _, ok := accWrp.(*state.PeerAccount) assert.Equal(t, true, ok) assert.Nil(t, err) assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_BadShardID(t *testing.T) { +func TestNewAccountFactoryCreator_UnknownType(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 10, - NrOfShards: 5, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(10) assert.Nil(t, accF) - assert.Equal(t, state.ErrUnknownShardId, err) + assert.Equal(t, state.ErrUnknownAccountType, err) } diff --git a/data/state/factory/accountCreator_test.go b/data/state/factory/accountCreator_test.go index cf63e6219a0..1ffc6d27a7e 100644 --- a/data/state/factory/accountCreator_test.go +++ b/data/state/factory/accountCreator_test.go @@ -12,11 +12,7 @@ import ( func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -31,11 +27,7 @@ func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -50,11 +42,7 @@ func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) diff --git a/data/state/factory/metaAccountCreator_test.go b/data/state/factory/metaAccountCreator_test.go index 836c3d88ef8..326ba3e719c 100644 --- a/data/state/factory/metaAccountCreator_test.go +++ b/data/state/factory/metaAccountCreator_test.go @@ -6,18 +6,13 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -32,11 +27,7 @@ func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -51,11 +42,7 @@ func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestMetaAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go new file mode 100644 index 00000000000..a1edba4e880 --- /dev/null +++ b/data/state/factory/peerAccountCreator.go @@ -0,0 +1,30 @@ +package factory + +import "github.com/ElrondNetwork/elrond-go/data/state" + +// PeerAccountCreator has a method to create a new peer account +type PeerAccountCreator struct { +} + +// NewPeerAccountCreator creates a peer account creator +func NewPeerAccountCreator() state.AccountFactory { + return &PeerAccountCreator{} +} + +// CreateAccount calls the new Account creator and returns the result +func (c *PeerAccountCreator) CreateAccount(address state.AddressContainer, tracker state.AccountTracker) (state.AccountHandler, error) { + account, err := state.NewPeerAccount(address, tracker) + if err != nil { + return nil, err + } + + return account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *PeerAccountCreator) IsInterfaceNil() bool { + if c == nil { + return true + } + return false +} diff --git a/data/state/factory/peerAccountCreator_test.go b/data/state/factory/peerAccountCreator_test.go new file mode 100644 index 00000000000..4496bcdae3e --- /dev/null +++ b/data/state/factory/peerAccountCreator_test.go @@ -0,0 +1,55 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccountCreator_CreateAccountNilAddress(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccountCreator_CreateAccountOk(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go new file mode 100644 index 00000000000..976149dbe79 --- /dev/null +++ b/data/state/peerAccount.go @@ -0,0 +1,369 @@ +package state + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +// TimeStamp is a moment defined by epoch and round +type TimeStamp struct { + Epoch uint64 + Round uint64 +} + +// TimePeriod holds start and end time +type TimePeriod struct { + StartTime TimeStamp + EndTime TimeStamp +} + +// SignRate is used to keep the number of success and failed signings +type SignRate struct { + NrSuccess uint32 + NrFailure uint32 +} + +// PeerAccount is the struct used in serialization/deserialization +type PeerAccount struct { + BLSPublicKey []byte + SchnorrPublicKey []byte + Address []byte + Stake *big.Int + + JailTime TimePeriod + PastJailTimes []TimePeriod + + CurrentShardId uint32 + NextShardId uint32 + NodeInWaitingList bool + + ValidatorSuccessRate SignRate + LeaderSuccessRate SignRate + + CodeHash []byte + + Rating uint32 + RootHash []byte + Nonce uint64 + + addressContainer AddressContainer + code []byte + accountTracker AccountTracker + dataTrieTracker DataTrieTracker +} + +// NewPeerAccount creates new simple account wrapper for an PeerAccountContainer (that has just been initialized) +func NewPeerAccount( + addressContainer AddressContainer, + tracker AccountTracker, +) (*PeerAccount, error) { + if addressContainer == nil { + return nil, ErrNilAddressContainer + } + if tracker == nil { + return nil, ErrNilAccountTracker + } + + return &PeerAccount{ + addressContainer: addressContainer, + accountTracker: tracker, + dataTrieTracker: NewTrackableDataTrie(nil), + }, nil +} + +// IsInterfaceNil return if there is no value under the interface +func (a *PeerAccount) IsInterfaceNil() bool { + if a == nil { + return true + } + return false +} + +// AddressContainer returns the address associated with the account +func (a *PeerAccount) AddressContainer() AddressContainer { + return a.addressContainer +} + +// SetNonceWithJournal sets the account's nonce, saving the old nonce before changing +func (a *PeerAccount) SetNonceWithJournal(nonce uint64) error { + entry, err := NewBaseJournalEntryNonce(a, a.Nonce) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Nonce = nonce + + return a.accountTracker.SaveAccount(a) +} + +//SetNonce saves the nonce to the account +func (a *PeerAccount) SetNonce(nonce uint64) { + a.Nonce = nonce +} + +// GetNonce gets the nonce of the account +func (a *PeerAccount) GetNonce() uint64 { + return a.Nonce +} + +// GetCodeHash returns the code hash associated with this account +func (a *PeerAccount) GetCodeHash() []byte { + return a.CodeHash +} + +// SetCodeHash sets the code hash associated with the account +func (a *PeerAccount) SetCodeHash(codeHash []byte) { + a.CodeHash = codeHash +} + +// SetCodeHashWithJournal sets the account's code hash, saving the old code hash before changing +func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { + entry, err := NewBaseJournalEntryCodeHash(a, a.CodeHash) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CodeHash = codeHash + + return a.accountTracker.SaveAccount(a) +} + +// GetCode gets the actual code that needs to be run in the VM +func (a *PeerAccount) GetCode() []byte { + return a.code +} + +// SetCode sets the actual code that needs to be run in the VM +func (a *PeerAccount) SetCode(code []byte) { + a.code = code +} + +// GetRootHash returns the root hash associated with this account +func (a *PeerAccount) GetRootHash() []byte { + return a.RootHash +} + +// SetRootHash sets the root hash associated with the account +func (a *PeerAccount) SetRootHash(roothash []byte) { + a.RootHash = roothash +} + +// SetRootHashWithJournal sets the account's root hash, saving the old root hash before changing +func (a *PeerAccount) SetRootHashWithJournal(rootHash []byte) error { + entry, err := NewBaseJournalEntryRootHash(a, a.RootHash, a.DataTrie()) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.RootHash = rootHash + + return a.accountTracker.SaveAccount(a) +} + +// DataTrie returns the trie that holds the current account's data +func (a *PeerAccount) DataTrie() data.Trie { + return a.dataTrieTracker.DataTrie() +} + +// SetDataTrie sets the trie that holds the current account's data +func (a *PeerAccount) SetDataTrie(trie data.Trie) { + a.dataTrieTracker.SetDataTrie(trie) +} + +// DataTrieTracker returns the trie wrapper used in managing the SC data +func (a *PeerAccount) DataTrieTracker() DataTrieTracker { + return a.dataTrieTracker +} + +// SetAddressWithJournal sets the account's address, saving the old address before changing +func (a *PeerAccount) SetAddressWithJournal(address []byte) error { + if len(address) < 1 { + return ErrEmptyAddress + } + + entry, err := NewPeerJournalEntryAddress(a, a.Address) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Address = address + + return a.accountTracker.SaveAccount(a) +} + +// SetSchnorrPublicKeyWithJournal sets the account's public key, saving the old key before changing +func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilSchnorrPublicKey + } + + entry, err := NewPeerJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.SchnorrPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetBLSPublicKeyWithJournal sets the account's bls public key, saving the old key before changing +func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilBLSPublicKey + } + + entry, err := NewPeerJournalEntryBLSPublicKey(a, a.BLSPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.BLSPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetStakeWithJournal sets the account's stake, saving the old stake before changing +func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { + if stake == nil { + return ErrNilStake + } + + entry, err := NewPeerJournalEntryStake(a, a.Stake) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Stake = stake + + return a.accountTracker.SaveAccount(a) +} + +// SetJailTimeWithJournal sets the account's jail time, saving the old state before changing +func (a *PeerAccount) SetJailTimeWithJournal(jailTime TimePeriod) error { + entry, err := NewPeerJournalEntryJailTime(a, a.JailTime) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.JailTime = jailTime + + return a.accountTracker.SaveAccount(a) +} + +// SetCurrentShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetCurrentShardIdWithJournal(shId uint32) error { + entry, err := NewPeerJournalEntryCurrentShardId(a, a.CurrentShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CurrentShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNextShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetNextShardIdWithJournal(shId uint32) error { + entry, err := NewPeerJournalEntryNextShardId(a, a.NextShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NextShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNodeInWaitingListWithJournal sets the account's nodes status whether in waiting list, saving the old state before +func (a *PeerAccount) SetNodeInWaitingListWithJournal(nodeInWaitingList bool) error { + entry, err := NewPeerJournalEntryInWaitingList(a, a.NodeInWaitingList) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NodeInWaitingList = nodeInWaitingList + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseValidatorSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseValidatorSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseValidatorSuccessRateWithJournal increases the account's number of missed signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrFailure++ + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseLeaderSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseLeaderSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseLeaderSuccessRateWithJournal increases the account's number of missing signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrFailure++ + + return a.accountTracker.SaveAccount(a) +} + +// SetRatingWithJournal sets the account's rating id, saving the old state before changing +func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { + entry, err := NewPeerJournalEntryRating(a, a.Rating) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Rating = rating + + return a.accountTracker.SaveAccount(a) +} diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go new file mode 100644 index 00000000000..8c762964d6d --- /dev/null +++ b/data/state/peerAccount_test.go @@ -0,0 +1,592 @@ +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccount_MarshalUnmarshal_ShouldWork(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + addrTr := &mock.AccountTrackerStub{} + acnt, _ := state.NewPeerAccount(addr, addrTr) + + marshalizer := mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(&acnt) + + acntRecovered, _ := state.NewPeerAccount(addr, addrTr) + _ = marshalizer.Unmarshal(acntRecovered, buff) + + assert.Equal(t, acnt, acntRecovered) +} + +func TestPeerAccount_NewAccountNilAddress(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccount_NewPeerAccountNilAaccountTracker(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccount_NewPeerAccountOk(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} + +func TestPeerAccount_AddressContainer(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + acc, err := state.NewPeerAccount(addr, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, addr, acc.AddressContainer()) +} + +func TestPeerAccount_GetCode(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCode(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCode()) +} + +func TestPeerAccount_GetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.CodeHash = code + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_SetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCodeHash(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_GetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.RootHash = root + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_SetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.SetRootHash(root) + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_DataTrie(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + trie := &mock.TrieStub{} + acc.SetDataTrie(trie) + + assert.NotNil(t, acc) + assert.Equal(t, trie, acc.DataTrie()) +} + +func TestPeerAccount_SetNonceWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + nonce := uint64(0) + err = acc.SetNonceWithJournal(nonce) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, nonce, acc.Nonce) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCodeHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + codeHash := []byte("codehash") + err = acc.SetCodeHashWithJournal(codeHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, codeHash, acc.CodeHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRootHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rootHash := []byte("roothash") + err = acc.SetRootHashWithJournal(rootHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rootHash, acc.RootHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetAddressWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + address := []byte("address") + err = acc.SetAddressWithJournal(address) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, address, acc.Address) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetSchnorrPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetSchnorrPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.SchnorrPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetBLSPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetBLSPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.BLSPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetStakeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + stake := big.NewInt(250000) + err = acc.SetStakeWithJournal(stake) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), acc.Stake.Uint64()) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCurrentShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetCurrentShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.CurrentShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNextShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetNextShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.NextShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNodeInWaitingListWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + err = acc.SetNodeInWaitingListWithJournal(true) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, true, acc.NodeInWaitingList) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRatingWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rating := uint32(10) + err = acc.SetRatingWithJournal(rating) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rating, acc.Rating) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetJailTimeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + jailTime := state.TimePeriod{ + StartTime: state.TimeStamp{Epoch: 12, Round: 12}, + EndTime: state.TimeStamp{Epoch: 13, Round: 13}, + } + err = acc.SetJailTimeWithJournal(jailTime) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, jailTime, acc.JailTime) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} diff --git a/data/state/peerAccountsDB.go b/data/state/peerAccountsDB.go new file mode 100644 index 00000000000..0dd6b35c4b8 --- /dev/null +++ b/data/state/peerAccountsDB.go @@ -0,0 +1,6 @@ +package state + +// peerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator +type peerAccountsDB struct { + *AccountsDB +} diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go new file mode 100644 index 00000000000..84251b32aee --- /dev/null +++ b/data/state/peerJournalEntries.go @@ -0,0 +1,386 @@ +package state + +import "math/big" + +//------- PeerJournalEntryAddress + +// PeerJournalEntryAddress is used to revert a round change +type PeerJournalEntryAddress struct { + account *PeerAccount + oldAddress []byte +} + +// NewPeerJournalEntryAddress outputs a new PeerJournalEntry implementation used to revert a round change +func NewPeerJournalEntryAddress(account *PeerAccount, oldAddress []byte) (*PeerJournalEntryAddress, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryAddress{ + account: account, + oldAddress: oldAddress, + }, nil +} + +// Revert applies undo operation +func (pje *PeerJournalEntryAddress) Revert() (AccountHandler, error) { + pje.account.Address = pje.oldAddress + + return pje.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pje *PeerJournalEntryAddress) IsInterfaceNil() bool { + if pje == nil { + return true + } + return false +} + +//------- PeerJournalEntrySchnorrPublicKey + +// PeerJournalEntrySchnorrPublicKey is used to revert a round change +type PeerJournalEntrySchnorrPublicKey struct { + account *PeerAccount + oldSchnorrPubKey []byte +} + +// NewPeerJournalEntrySchnorrPublicKey outputs a new PeerJournalEntrySchnorrPublicKey implementation used to revert a round change +func NewPeerJournalEntrySchnorrPublicKey( + account *PeerAccount, + oldSchnorrPubKey []byte, +) (*PeerJournalEntrySchnorrPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntrySchnorrPublicKey{ + account: account, + oldSchnorrPubKey: oldSchnorrPubKey, + }, nil +} + +// Revert applies undo operation +func (jens *PeerJournalEntrySchnorrPublicKey) Revert() (AccountHandler, error) { + jens.account.SchnorrPublicKey = jens.oldSchnorrPubKey + + return jens.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (jens *PeerJournalEntrySchnorrPublicKey) IsInterfaceNil() bool { + if jens == nil { + return true + } + return false +} + +//------- PeerJournalEntryBLSPublicKey + +// PeerJournalEntryBLSPublicKey is used to revert a round change +type PeerJournalEntryBLSPublicKey struct { + account *PeerAccount + oldBLSPubKey []byte +} + +// NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntryBLSPublicKey implementation used to revert a round change +func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldBLSPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryBLSPublicKey{ + account: account, + oldBLSPubKey: oldBLSPubKey, + }, nil +} + +// Revert applies undo operation +func (pjeb *PeerJournalEntryBLSPublicKey) Revert() (AccountHandler, error) { + pjeb.account.BLSPublicKey = pjeb.oldBLSPubKey + + return pjeb.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjeb *PeerJournalEntryBLSPublicKey) IsInterfaceNil() bool { + if pjeb == nil { + return true + } + return false +} + +//------- PeerJournalEntryStake + +// PeerJournalEntryStake is used to revert a stake change +type PeerJournalEntryStake struct { + account *PeerAccount + oldStake *big.Int +} + +// NewPeerJournalEntryStake outputs a new PeerJournalEntryStake implementation used to revert a stake change +func NewPeerJournalEntryStake(account *PeerAccount, oldStake *big.Int) (*PeerJournalEntryStake, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryStake{ + account: account, + oldStake: oldStake, + }, nil +} + +// Revert applies undo operation +func (pjes *PeerJournalEntryStake) Revert() (AccountHandler, error) { + pjes.account.Stake = pjes.oldStake + + return pjes.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjes *PeerJournalEntryStake) IsInterfaceNil() bool { + if pjes == nil { + return true + } + return false +} + +// PeerJournalEntryJailTime is used to revert a balance change +type PeerJournalEntryJailTime struct { + account *PeerAccount + oldJailTime TimePeriod +} + +// NewPeerJournalEntryJailTime outputs a new PeerJournalEntryJailTime implementation used to revert a state change +func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) (*PeerJournalEntryJailTime, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryJailTime{ + account: account, + oldJailTime: oldJailTime, + }, nil +} + +// Revert applies undo operation +func (pjej *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { + pjej.account.JailTime = pjej.oldJailTime + + return pjej.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjej *PeerJournalEntryJailTime) IsInterfaceNil() bool { + if pjej == nil { + return true + } + return false +} + +// PeerJournalEntryCurrentShardId is used to revert a shardId change +type PeerJournalEntryCurrentShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntryCurrentShardId implementation used to revert a state change +func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryCurrentShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryCurrentShardId{ + account: account, + oldShardId: oldShardId, + }, nil +} + +// Revert applies undo operation +func (pjec *PeerJournalEntryCurrentShardId) Revert() (AccountHandler, error) { + pjec.account.CurrentShardId = pjec.oldShardId + + return pjec.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjec *PeerJournalEntryCurrentShardId) IsInterfaceNil() bool { + if pjec == nil { + return true + } + return false +} + +// PeerJournalEntryNextShardId is used to revert a shardId change +type PeerJournalEntryNextShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryNextShardId outputs a new PeerJournalEntryNextShardId implementation used to revert a state change +func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryNextShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryNextShardId{ + account: account, + oldShardId: oldShardId, + }, nil +} + +// Revert applies undo operation +func (pjen *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { + pjen.account.NextShardId = pjen.oldShardId + + return pjen.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjen *PeerJournalEntryNextShardId) IsInterfaceNil() bool { + if pjen == nil { + return true + } + return false +} + +// PeerJournalEntryInWaitingList is used to revert a shardId change +type PeerJournalEntryInWaitingList struct { + account *PeerAccount + oldNodeInWaitingList bool +} + +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryInWaitingList implementation used to revert a state change +func NewPeerJournalEntryInWaitingList( + account *PeerAccount, + oldNodeInWaitingList bool, +) (*PeerJournalEntryInWaitingList, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryInWaitingList{ + account: account, + oldNodeInWaitingList: oldNodeInWaitingList, + }, nil +} + +// Revert applies undo operation +func (pjew *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { + pjew.account.NodeInWaitingList = pjew.oldNodeInWaitingList + + return pjew.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjew *PeerJournalEntryInWaitingList) IsInterfaceNil() bool { + if pjew == nil { + return true + } + return false +} + +// PeerJournalEntryValidatorSuccessRate is used to revert a success rate change +type PeerJournalEntryValidatorSuccessRate struct { + account *PeerAccount + oldValidatorSuccessRate SignRate +} + +// NewPeerJournalEntryValidatorSuccessRate outputs a new PeerJournalEntryValidatorSuccessRate implementation used to revert a state change +func NewPeerJournalEntryValidatorSuccessRate( + account *PeerAccount, + oldValidatorSuccessRate SignRate, +) (*PeerJournalEntryValidatorSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryValidatorSuccessRate{ + account: account, + oldValidatorSuccessRate: oldValidatorSuccessRate, + }, nil +} + +// Revert applies undo operation +func (pjev *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { + pjev.account.ValidatorSuccessRate = pjev.oldValidatorSuccessRate + + return pjev.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjev *PeerJournalEntryValidatorSuccessRate) IsInterfaceNil() bool { + if pjev == nil { + return true + } + return false +} + +// PeerJournalEntryLeaderSuccessRate is used to revert a success rate change +type PeerJournalEntryLeaderSuccessRate struct { + account *PeerAccount + oldLeaderSuccessRate SignRate +} + +// NewPeerJournalEntryLeaderSuccessRate outputs a new PeerJournalEntryLeaderSuccessRate implementation used to revert a state change +func NewPeerJournalEntryLeaderSuccessRate( + account *PeerAccount, + oldLeaderSuccessRate SignRate, +) (*PeerJournalEntryLeaderSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryLeaderSuccessRate{ + account: account, + oldLeaderSuccessRate: oldLeaderSuccessRate, + }, nil +} + +// Revert applies undo operation +func (pjel *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { + pjel.account.LeaderSuccessRate = pjel.oldLeaderSuccessRate + + return pjel.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjel *PeerJournalEntryLeaderSuccessRate) IsInterfaceNil() bool { + if pjel == nil { + return true + } + return false +} + +// PeerJournalEntryRating is used to revert a rating change +type PeerJournalEntryRating struct { + account *PeerAccount + oldRating uint32 +} + +// NewPeerJournalEntryRating outputs a new PeerJournalEntryRating implementation used to revert a state change +func NewPeerJournalEntryRating(account *PeerAccount, oldRating uint32) (*PeerJournalEntryRating, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryRating{ + account: account, + oldRating: oldRating, + }, nil +} + +// Revert applies undo operation +func (pjer *PeerJournalEntryRating) Revert() (AccountHandler, error) { + pjer.account.Rating = pjer.oldRating + + return pjer.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjer *PeerJournalEntryRating) IsInterfaceNil() bool { + if pjer == nil { + return true + } + return false +} diff --git a/data/state/peerJournalEntries_test.go b/data/state/peerJournalEntries_test.go new file mode 100644 index 00000000000..06f5edf097d --- /dev/null +++ b/data/state/peerJournalEntries_test.go @@ -0,0 +1,360 @@ +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerJournalEntryAddress_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryAddress(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryAddress_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryAddress(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryAddress_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryAddress(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.Address) +} + +func TestPeerJournalEntrySchnorrPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntrySchnorrPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.SchnorrPublicKey) +} + +func TestPeerJournalEntryBLSPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryBLSPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryBLSPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryBLSPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryBLSPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryBLSPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.BLSPublicKey) +} + +func TestPeerJournalEntryStake_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryStake(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryStake_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryStake(accnt, big.NewInt(9)) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryStake_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + stake := big.NewInt(999) + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryStake(accnt, stake) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), accnt.Stake.Uint64()) +} + +func TestPeerJournalEntryJailTime_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + entry, err := state.NewPeerJournalEntryJailTime(nil, jailTime) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryJailTime_ShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryJailTime(accnt, jailTime) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryJailTime_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryJailTime(accnt, jailTime) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, jailTime, accnt.JailTime) +} + +func TestPeerJournalEntryCurrentShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryCurrentShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryCurrentShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryCurrentShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryCurrentShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryCurrentShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.CurrentShardId) +} + +func TestPeerJournalEntryNextShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryNextShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryNextShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryNextShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryNextShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryNextShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.NextShardId) +} + +func TestPeerJournalEntryInWaitingList_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryInWaitingList(nil, true) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryInWaitingList_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryInWaitingList(accnt, true) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryInWaitingList_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryInWaitingList(accnt, true) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.True(t, accnt.NodeInWaitingList) +} + +func TestPeerJournalEntryValidatorSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.ValidatorSuccessRate) +} + +func TestPeerJournalEntryLeaderSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.LeaderSuccessRate) +} + +func TestPeerJournalEntryRating_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryRating(nil, 10) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryRating_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryRating(accnt, 10) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryRating_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryRating(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.Rating) +} diff --git a/dataRetriever/dataPool/shardDataPool.go b/dataRetriever/dataPool/shardDataPool.go index af17fdcccab..f87f3cfd726 100644 --- a/dataRetriever/dataPool/shardDataPool.go +++ b/dataRetriever/dataPool/shardDataPool.go @@ -8,6 +8,7 @@ import ( type shardedDataPool struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher headersNonces dataRetriever.Uint64SyncMapCacher @@ -19,6 +20,7 @@ type shardedDataPool struct { func NewShardedDataPool( transactions dataRetriever.ShardedDataCacherNotifier, unsignedTransactions dataRetriever.ShardedDataCacherNotifier, + rewardTransactions dataRetriever.ShardedDataCacherNotifier, headers storage.Cacher, headersNonces dataRetriever.Uint64SyncMapCacher, miniBlocks storage.Cacher, @@ -32,6 +34,9 @@ func NewShardedDataPool( if unsignedTransactions == nil || unsignedTransactions.IsInterfaceNil() { return nil, dataRetriever.ErrNilUnsignedTransactionPool } + if rewardTransactions == nil || rewardTransactions.IsInterfaceNil() { + return nil, dataRetriever.ErrNilRewardTransactionPool + } if headers == nil || headers.IsInterfaceNil() { return nil, dataRetriever.ErrNilHeadersDataPool } @@ -51,6 +56,7 @@ func NewShardedDataPool( return &shardedDataPool{ transactions: transactions, unsignedTransactions: unsignedTransactions, + rewardTransactions: rewardTransactions, headers: headers, headersNonces: headersNonces, miniBlocks: miniBlocks, @@ -69,6 +75,11 @@ func (tdp *shardedDataPool) UnsignedTransactions() dataRetriever.ShardedDataCach return tdp.unsignedTransactions } +// RewardTransactions returns the holder for reward transactions (cross shard result entities) +func (tdp *shardedDataPool) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return tdp.rewardTransactions +} + // Headers returns the holder for headers func (tdp *shardedDataPool) Headers() storage.Cacher { return tdp.headers diff --git a/dataRetriever/dataPool/shardDataPool_test.go b/dataRetriever/dataPool/shardDataPool_test.go index 4b47d68cfa1..14882486d01 100644 --- a/dataRetriever/dataPool/shardDataPool_test.go +++ b/dataRetriever/dataPool/shardDataPool_test.go @@ -15,6 +15,7 @@ func TestNewShardedDataPool_NilTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( nil, &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -30,6 +31,7 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( &mock.ShardedDataStub{}, nil, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -41,8 +43,25 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { assert.Nil(t, tdp) } +func TestNewShardedDataPool_NilRewardTransactionsShouldErr(t *testing.T) { + tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, + nil, + &mock.CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + ) + + assert.Equal(t, dataRetriever.ErrNilRewardTransactionPool, err) + assert.Nil(t, tdp) +} + func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, nil, @@ -58,6 +77,7 @@ func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -73,6 +93,7 @@ func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -88,6 +109,7 @@ func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -103,6 +125,7 @@ func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -119,6 +142,7 @@ func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { transactions := &mock.ShardedDataStub{} scResults := &mock.ShardedDataStub{} + rewardTransactions := &mock.ShardedDataStub{} headers := &mock.CacherStub{} headerNonces := &mock.Uint64SyncMapCacherStub{} txBlocks := &mock.CacherStub{} @@ -127,6 +151,7 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( transactions, scResults, + rewardTransactions, headers, headerNonces, txBlocks, @@ -137,6 +162,8 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) //pointer checking assert.True(t, transactions == tdp.Transactions()) + assert.True(t, scResults == tdp.UnsignedTransactions()) + assert.True(t, rewardTransactions == tdp.RewardTransactions()) assert.True(t, headers == tdp.Headers()) assert.True(t, headerNonces == tdp.HeadersNonces()) assert.True(t, txBlocks == tdp.MiniBlocks()) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 627f318ff13..bf32a10e690 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -64,6 +64,9 @@ var ErrNilTxDataPool = errors.New("nil transaction data pool") // ErrNilUnsignedTransactionPool signals that a nil unsigned transactions pool has been provided var ErrNilUnsignedTransactionPool = errors.New("nil unsigned transactions data pool") +// ErrNilRewardTransactionPool signals that a nil reward transactions pool has been provided +var ErrNilRewardTransactionPool = errors.New("nil reward transaction data pool") + // ErrNilHeadersDataPool signals that a nil header pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") @@ -176,6 +179,9 @@ var ErrEmptyTxRequestTopic = errors.New("empty transaction request topic") // ErrEmptyScrRequestTopic signals that an empty smart contract result topic has been provided var ErrEmptyScrRequestTopic = errors.New("empty smart contract result request topic") +// ErrEmptyRewardTxRequestTopic signals that an empty reward transaction topic has been provided +var ErrEmptyRewardTxRequestTopic = errors.New("empty rewards transactions request topic") + // ErrEmptyMiniBlockRequestTopic signals that an empty miniblock topic has been provided var ErrEmptyMiniBlockRequestTopic = errors.New("empty miniblock request topic") diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 757f9539353..eba1026d37d 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -74,7 +74,11 @@ func NewResolversContainerFactory( func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer, error) { container := containers.NewResolversContainer() - keys, resolverSlice, err := rcf.generateTxResolvers(factory.TransactionTopic, dataRetriever.TransactionUnit, rcf.dataPools.Transactions()) + keys, resolverSlice, err := rcf.generateTxResolvers( + factory.TransactionTopic, + dataRetriever.TransactionUnit, + rcf.dataPools.Transactions(), + ) if err != nil { return nil, err } @@ -96,6 +100,20 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.RewardsTransactionTopic, + dataRetriever.RewardTransactionUnit, + rcf.dataPools.RewardTransactions(), + ) + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + keys, resolverSlice, err = rcf.generateHdrResolver() if err != nil { return nil, err diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index 272c76093fa..aee73e043d8 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -69,6 +69,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -415,13 +418,14 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverSCRs := noOfShards numResolverTxs := noOfShards + numResolverRewardTxs := noOfShards numResolverHeaders := 1 numResolverMiniBlocks := noOfShards numResolverPeerChanges := 1 numResolverMetachainShardHeaders := 1 numResolverMetaBlockHeaders := 1 totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs assert.Equal(t, totalResolvers, container.Len()) } diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index df741bdc544..202ab369088 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -28,8 +28,10 @@ const ( MetaPeerDataUnit UnitType = 6 // UnsignedTransactionUnit is the unsigned transaction unit identifier UnsignedTransactionUnit UnitType = 7 + // RewardTransactionUnit is the reward transaction unit identifier + RewardTransactionUnit UnitType = 8 // MetaHdrNonceHashDataUnit is the meta header nonce-hash pair data unit identifier - MetaHdrNonceHashDataUnit UnitType = 8 + MetaHdrNonceHashDataUnit UnitType = 9 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 @@ -196,6 +198,7 @@ type Uint64SyncMapCacher interface { type PoolsHolder interface { Transactions() ShardedDataCacherNotifier UnsignedTransactions() ShardedDataCacherNotifier + RewardTransactions() ShardedDataCacherNotifier Headers() storage.Cacher HeadersNonces() Uint64SyncMapCacher MiniBlocks() storage.Cacher diff --git a/dataRetriever/mock/poolsHolderStub.go b/dataRetriever/mock/poolsHolderStub.go index 43599982ea8..d189b57d055 100644 --- a/dataRetriever/mock/poolsHolderStub.go +++ b/dataRetriever/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -43,6 +44,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 690d87a8b6c..76ebbcf5764 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -10,13 +10,14 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -type ResolverRequestHandler struct { +type resolverRequestHandler struct { resolversFinder dataRetriever.ResolversFinder txRequestTopic string scrRequestTopic string + rewardTxRequestTopic string mbRequestTopic string shardHdrRequestTopic string - metaHdrRequestTopic string + metaHdrRequestTopic string isMetaChain bool maxTxsToRequest int } @@ -28,11 +29,12 @@ func NewShardResolverRequestHandler( finder dataRetriever.ResolversFinder, txRequestTopic string, scrRequestTopic string, + rewardTxRequestTopic string, mbRequestTopic string, shardHdrRequestTopic string, metaHdrRequestTopic string, maxTxsToRequest int, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil || finder.IsInterfaceNil() { return nil, dataRetriever.ErrNilResolverFinder } @@ -42,6 +44,9 @@ func NewShardResolverRequestHandler( if len(scrRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyScrRequestTopic } + if len(rewardTxRequestTopic) == 0 { + return nil, dataRetriever.ErrEmptyRewardTxRequestTopic + } if len(mbRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyMiniBlockRequestTopic } @@ -55,13 +60,14 @@ func NewShardResolverRequestHandler( return nil, dataRetriever.ErrInvalidMaxTxRequest } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, txRequestTopic: txRequestTopic, mbRequestTopic: mbRequestTopic, shardHdrRequestTopic: shardHdrRequestTopic, metaHdrRequestTopic: metaHdrRequestTopic, scrRequestTopic: scrRequestTopic, + rewardTxRequestTopic: rewardTxRequestTopic, isMetaChain: false, maxTxsToRequest: maxTxsToRequest, } @@ -74,7 +80,7 @@ func NewMetaResolverRequestHandler( finder dataRetriever.ResolversFinder, shardHdrRequestTopic string, metaHdrRequestTopic string, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil || finder.IsInterfaceNil() { return nil, dataRetriever.ErrNilResolverFinder } @@ -85,7 +91,7 @@ func NewMetaResolverRequestHandler( return nil, dataRetriever.ErrEmptyMetaHeaderRequestTopic } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, shardHdrRequestTopic: shardHdrRequestTopic, metaHdrRequestTopic: metaHdrRequestTopic, @@ -96,11 +102,11 @@ func NewMetaResolverRequestHandler( } // RequestTransaction method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { rrh.requestByHashes(destShardID, txHashes, rrh.txRequestTopic) } -func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { +func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { log.Debug(fmt.Sprintf("Requesting %d transactions from shard %d from network on topic %s...\n", len(hashes), destShardID, topic)) resolver, err := rrh.resolversFinder.CrossShardResolver(topic, destShardID) if err != nil { @@ -132,17 +138,22 @@ func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [] } // RequestUnsignedTransactions method asks for unsigned transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { rrh.requestByHashes(destShardID, scrHashes, rrh.scrRequestTopic) } +// RequestRewardTransactions requests for reward transactions from the connected peers +func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte) { + rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) +} + // RequestMiniBlock method asks for miniblocks from the connected peers -func (rrh *ResolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { +func (rrh *resolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { rrh.requestByHash(shardId, miniblockHash, rrh.mbRequestTopic) } // RequestHeader method asks for header from the connected peers -func (rrh *ResolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { +func (rrh *resolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { //TODO: Refactor this class and create specific methods for requesting shard or meta data var topic string if shardId == sharding.MetachainShardId { @@ -154,7 +165,7 @@ func (rrh *ResolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { rrh.requestByHash(shardId, hash, topic) } -func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { +func (rrh *resolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { log.Debug(fmt.Sprintf("Requesting %s from shard %d with hash %s from network\n", baseTopic, destShardID, core.ToB64(hash))) var resolver dataRetriever.Resolver @@ -178,7 +189,7 @@ func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte } // RequestHeaderByNonce method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { +func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { var err error var resolver dataRetriever.Resolver var topic string @@ -208,7 +219,7 @@ func (rrh *ResolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonc } // IsInterfaceNil returns true if there is no value under the interface -func (rrh *ResolverRequestHandler) IsInterfaceNil() bool { +func (rrh *resolverRequestHandler) IsInterfaceNil() bool { if rrh == nil { return true } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 2e5f4b747de..bb840b0cd9f 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -51,7 +51,16 @@ func TestNewMetaResolverRequestHandler(t *testing.T) { func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(nil, "topic", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + nil, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrNilResolverFinder, err) @@ -60,7 +69,16 @@ func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyTxRequestTopic, err) @@ -69,7 +87,16 @@ func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyScrRequestTopic, err) @@ -78,7 +105,15 @@ func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "", + "topic", + "topic", + 1) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyMiniBlockRequestTopic, err) @@ -87,7 +122,7 @@ func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "", "topic", 1) + rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "", "topic", 1) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyShardHeaderRequestTopic, err) @@ -96,7 +131,16 @@ func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMetaHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyMetaHeaderRequestTopic, err) @@ -105,7 +149,16 @@ func TestNewShardResolverRequestHandlerMetaHdrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "topic", 0) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 0, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) @@ -114,7 +167,16 @@ func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { func TestNewShardResolverRequestHandler(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, err) assert.NotNil(t, rrh) @@ -144,6 +206,7 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso "topic", "topic", "topic", + "topic", 1, ) @@ -173,6 +236,7 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -201,6 +265,7 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -245,6 +310,7 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( "topic", "topic", "topic", + "topic", 1, ) @@ -283,6 +349,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv "topic", "topic", "topic", + "topic", 1, ) @@ -317,6 +384,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -345,6 +413,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -377,6 +446,7 @@ func TestResolverRequestHandler_RequestHeaderShouldCallRequestOnResolver(t *test "topic", "topic", "topic", + "topic", 1, ) @@ -410,6 +480,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsErrorShoul "topic", "topic", "topic", + "topic", 1, ) @@ -444,6 +515,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsAWrongReso "topic", "topic", "topic", + "topic", 1, ) @@ -478,6 +550,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardResolverFailsShouldNotP "topic", "topic", "topic", + "topic", 1, ) @@ -506,6 +579,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardShouldRequest(t *testin "topic", "topic", "topic", + "topic", 1, ) @@ -564,6 +638,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou "topic", "topic", "topic", + "topic", 1, ) @@ -593,6 +668,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing "topic", "topic", "topic", + "topic", 1, ) @@ -621,6 +697,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { "topic", "topic", "topic", + "topic", 1, ) @@ -665,6 +742,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi "topic", "topic", "topic", + "topic", 1, ) diff --git a/go.sum b/go.sum index 7feecd1ae9f..c2336c66e76 100644 --- a/go.sum +++ b/go.sum @@ -108,7 +108,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -176,7 +175,6 @@ github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZl github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index b0392f57683..e36bb533223 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -28,7 +28,14 @@ func getPkEncoded(pubKey crypto.PublicKey) string { return encodeAddress(pk) } -func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint64, consensusType string) ([]*testNode, p2p.Messenger, *sync.Map) { +func initNodesAndTest( + numNodes, + consensusSize, + numInvalid uint32, + roundTime uint64, + consensusType string, +) ([]*testNode, p2p.Messenger, *sync.Map) { + fmt.Println("Step 1. Setup nodes...") advertiser := createMessengerWithKadDht(context.Background(), "") @@ -43,24 +50,45 @@ func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint getConnectableAddress(advertiser), consensusType, ) - displayAndStartNodes(nodes) + + for _, nodesList := range nodes { + displayAndStartNodes(nodesList) + } if numInvalid < numNodes { for i := uint32(0); i < numInvalid; i++ { - nodes[i].blkProcessor.ProcessBlockCalled = func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - fmt.Println("process block invalid ", header.GetRound(), header.GetNonce(), getPkEncoded(nodes[i].pk)) + nodes[0][i].blkProcessor.ProcessBlockCalled = func( + blockChain data.ChainHandler, + header data.HeaderHandler, + body data.BodyHandler, + haveTime func() time.Duration, + ) error { + + fmt.Println( + "process block invalid ", + header.GetRound(), + header.GetNonce(), + getPkEncoded(nodes[0][i].pk), + ) return process.ErrBlockHashDoesNotMatch } - nodes[i].blkProcessor.CreateBlockHeaderCalled = func(body data.BodyHandler, round uint64, haveTime func() bool) (handler data.HeaderHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockHeaderCalled = func( + body data.BodyHandler, + round uint64, + haveTime func() bool, + ) (handler data.HeaderHandler, e error) { return nil, process.ErrAccountStateDirty } - nodes[i].blkProcessor.CreateBlockCalled = func(round uint64, haveTime func() bool) (handler data.BodyHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockCalled = func( + round uint64, + haveTime func() bool, + ) (handler data.BodyHandler, e error) { return nil, process.ErrWrongTypeAssertion } } } - return nodes, advertiser, concMap + return nodes[0], advertiser, concMap } func startNodesWithCommitBlock(nodes []*testNode, mutex *sync.Mutex, nonceForRoundMap map[uint64]uint64, totalCalled *int) error { diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 83639c2d48b..dc6a9f1e788 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "math/big" + "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" @@ -73,6 +75,48 @@ type testNode struct { metachainHdrRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + address := fmt.Sprintf("addr_%d_%d", shardId, i) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(address)) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) @@ -148,6 +192,7 @@ func createTestStore() dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -167,6 +212,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -191,21 +237,37 @@ func createAccountsDB(marshalizer marshal.Marshalizer) state.AccountsAdapter { return adb } -func initialPrivPubKeys(numConsensus int) ([]crypto.PrivateKey, []crypto.PublicKey, crypto.KeyGenerator) { - privKeys := make([]crypto.PrivateKey, 0) - pubKeys := make([]crypto.PublicKey, 0) - - testSuite := kyber.NewSuitePairingBn256() - testKeyGen := signing.NewKeyGenerator(testSuite) +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } - for i := 0; i < numConsensus; i++ { - sk, pk := testKeyGen.GeneratePair() + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs - privKeys = append(privKeys, sk) - pubKeys = append(pubKeys, pk) + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, } - return privKeys, pubKeys, testKeyGen + return params } func createHasher(consensusType string) hashing.Hasher { @@ -217,6 +279,7 @@ func createHasher(consensusType string) hashing.Hasher { func createConsensusOnlyNode( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, shardId uint32, selfId uint32, initialAddr string, @@ -348,6 +411,7 @@ func createConsensusOnlyNode( node.WithAccountsAdapter(accntAdapter), node.WithKeyGen(testKeyGen), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithBlockChain(blockChain), node.WithMultiSigner(testMultiSig), node.WithTxSingleSigner(singlesigner), @@ -374,41 +438,60 @@ func createNodes( roundTime uint64, serviceID string, consensusType string, -) []*testNode { +) map[uint32][]*testNode { + + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, 1, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + nodesList := make([]*testNode, nodesPerShard) - privKeys, pubKeys, testKeyGen := initialPrivPubKeys(nodesPerShard) - //first node generated will have is pk belonging to firstSkShardId - nodes := make([]*testNode, nodesPerShard) + pubKeys := make([]crypto.PublicKey, len(cp.keys[0])) + for idx, keyPairShard := range cp.keys[0] { + pubKeys[idx] = keyPairShard.pk + } for i := 0; i < nodesPerShard; i++ { testNode := &testNode{ shardId: uint32(0), } + kp := cp.keys[0][i] shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(1), uint32(0)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + consensusSize, + 1, + createHasher(consensusType), + 0, + 1, + validatorsMap, + ) + n, mes, blkProcessor, blkc := createConsensusOnlyNode( shardCoordinator, + nodesCoordinator, testNode.shardId, uint32(i), serviceID, uint32(consensusSize), roundTime, - privKeys[i], + kp.sk, pubKeys, - testKeyGen, + cp.keyGen, consensusType, ) testNode.node = n testNode.node = n - testNode.sk = privKeys[i] + testNode.sk = kp.sk testNode.mesenger = mes - testNode.pk = pubKeys[i] + testNode.pk = kp.pk testNode.blkProcessor = blkProcessor testNode.blkc = blkc - nodes[i] = testNode + nodesList[i] = testNode } + nodes[0] = nodesList return nodes } diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 30fa273531a..43cb543479e 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -106,7 +106,12 @@ func testInterceptedTxFromFrontendGeneratedParams( initialNodeAddr := "nodeAddr" valMinting := big.NewInt(20000) - node := integrationTests.NewTestProcessorNode(maxShards, nodeShardId, txSignPrivKeyShardId, initialNodeAddr) + node := integrationTests.NewTestProcessorNode( + maxShards, + nodeShardId, + txSignPrivKeyShardId, + initialNodeAddr, + ) txHexHash := "" diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 6a4ce0ce0b1..65a48103c79 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -22,6 +22,7 @@ type BlockProcessorMock struct { DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusDataCalled func(randomness []byte, round uint64, epoch uint32, shardId uint32) } // ProcessBlock mocks pocessing a block @@ -92,6 +93,12 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + if blProcMock.SetConsensusDataCalled != nil { + blProcMock.SetConsensusDataCalled(randomness, round, epoch, shardId) + } +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/integrationTests/mock/chronologyValidatorMock.go b/integrationTests/mock/chronologyValidatorMock.go deleted file mode 100644 index ba66f42421a..00000000000 --- a/integrationTests/mock/chronologyValidatorMock.go +++ /dev/null @@ -1,16 +0,0 @@ -package mock - -type ChronologyValidatorMock struct { -} - -func (cvm *ChronologyValidatorMock) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cvm *ChronologyValidatorMock) IsInterfaceNil() bool { - if cvm == nil { - return true - } - return false -} diff --git a/integrationTests/mock/hasherSpongeMock.go b/integrationTests/mock/hasherSpongeMock.go new file mode 100644 index 00000000000..2a1c66b9318 --- /dev/null +++ b/integrationTests/mock/hasherSpongeMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "golang.org/x/crypto/blake2b" +) + +var hasherSpongeEmptyHash []byte + +const hashSize = 16 + +// HasherSpongeMock that will be used for testing +type HasherSpongeMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha HasherSpongeMock) Compute(s string) []byte { + h, _ := blake2b.New(hashSize, nil) + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha HasherSpongeMock) EmptyHash() []byte { + if len(hasherSpongeEmptyHash) == 0 { + hasherSpongeEmptyHash = sha.Compute("") + } + return hasherSpongeEmptyHash +} + +// Size returns the required size in bytes +func (HasherSpongeMock) Size() int { + return hashSize +} diff --git a/integrationTests/mock/keyMock.go b/integrationTests/mock/keyMock.go new file mode 100644 index 00000000000..1b94601ef15 --- /dev/null +++ b/integrationTests/mock/keyMock.go @@ -0,0 +1,88 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" +) + +type PublicKeyMock struct { +} + +type PrivateKeyMock struct { +} + +type KeyGenMock struct { +} + +//------- PublicKeyMock + +func (sspk *PublicKeyMock) ToByteArray() ([]byte, error) { + return []byte("pubKey"), nil +} + +func (sspk *PublicKeyMock) Suite() crypto.Suite { + return nil +} + +func (sspk *PublicKeyMock) Point() crypto.Point { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sspk *PublicKeyMock) IsInterfaceNil() bool { + if sspk == nil { + return true + } + return false +} + +//------- PrivateKeyMock + +func (sk *PrivateKeyMock) ToByteArray() ([]byte, error) { + return []byte("privKey"), nil +} + +func (sk *PrivateKeyMock) GeneratePublic() crypto.PublicKey { + return &PublicKeyMock{} +} + +func (sk *PrivateKeyMock) Suite() crypto.Suite { + return nil +} + +func (sk *PrivateKeyMock) Scalar() crypto.Scalar { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sk *PrivateKeyMock) IsInterfaceNil() bool { + if sk == nil { + return true + } + return false +} + +//------KeyGenMock + +func (keyGen *KeyGenMock) GeneratePair() (crypto.PrivateKey, crypto.PublicKey) { + return &PrivateKeyMock{}, &PublicKeyMock{} +} + +func (keyGen *KeyGenMock) PrivateKeyFromByteArray(b []byte) (crypto.PrivateKey, error) { + return &PrivateKeyMock{}, nil +} + +func (keyGen *KeyGenMock) PublicKeyFromByteArray(b []byte) (crypto.PublicKey, error) { + return &PublicKeyMock{}, nil +} + +func (keyGen *KeyGenMock) Suite() crypto.Suite { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (keyGen *KeyGenMock) IsInterfaceNil() bool { + if keyGen == nil { + return true + } + return false +} diff --git a/integrationTests/mock/multiSigMock.go b/integrationTests/mock/multiSigMock.go index 77b82efc5fe..8d561394dc4 100644 --- a/integrationTests/mock/multiSigMock.go +++ b/integrationTests/mock/multiSigMock.go @@ -40,10 +40,10 @@ func NewMultiSigner(nrConsens uint32) *BelNevMock { multisigner.sigs = make([][]byte, nrConsens) multisigner.pubkeys = make([]string, nrConsens) - multisigner.aggCom = []byte("commitment") - multisigner.commHash = []byte("commitment") - multisigner.commSecret = []byte("commitment") - multisigner.aggSig = []byte("commitment") + multisigner.aggCom = []byte("agg commitment") + multisigner.commHash = []byte("commitment hash") + multisigner.commSecret = []byte("commitment secret") + multisigner.aggSig = []byte("aggregated signature") return multisigner } @@ -92,7 +92,11 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..560288d4016 --- /dev/null +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -0,0 +1,91 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{} + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..26f70f560c8 --- /dev/null +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -0,0 +1,151 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type SpecialAddressHandlerMock struct { + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData +} + +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } +} + +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + } + + return nil + +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + }) + + return nil + +} + +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn0000000000000000000000000000") + } + + return sh.BurnAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond00000000000000000000000000") + } + + return sh.ElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { + if sh.LeaderAddressCalled == nil { + if sh.shardConsensusData != nil && len(sh.shardConsensusData.Addresses) > 0 { + return []byte(sh.shardConsensusData.Addresses[0]) + } + return []byte("leader0000000000000000000000000000") + } + + return sh.LeaderAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) Round() uint64 { + if sh.shardConsensusData == nil { + return 0 + } + return sh.shardConsensusData.Round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + if sh.shardConsensusData == nil { + return 0 + } + return sh.shardConsensusData.Epoch +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } + + return sh.ShardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} diff --git a/integrationTests/mock/txTypeHandlerMock.go b/integrationTests/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..42b6460b56f --- /dev/null +++ b/integrationTests/mock/txTypeHandlerMock.go @@ -0,0 +1,25 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} + +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7e7175bdbff --- /dev/null +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -0,0 +1,61 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index 826e409ee66..ec6860ce474 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -44,6 +44,7 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { nodeMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) nodes := []*integrationTests.TestProcessorNode{nodeShard0, nodeShard1, nodeMeta} + idxNodeShard0 := 0 idxNodeShard1 := 1 idxNodeMeta := 2 @@ -127,18 +128,47 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + nodeProposerShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) + nodeValidatorShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) - nodeProposerShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - nodeValidatorShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - - nodeProposerShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeProposerShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000001006c560111a94e434413c1cdaafbc3e1348947d1d5b3a1") nodeProposerShard1.LoadTxSignSkBytes(hardCodedSk) - nodeValidatorShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeValidatorShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + ) - nodeProposerMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) - nodeValidatorMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) + nodeProposerMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + ) + nodeValidatorMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + ) nodes := []*integrationTests.TestProcessorNode{ nodeProposerShard0, diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 23f71be4fe4..61d23cef9aa 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -30,6 +30,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { valMinting := big.NewInt(100) valToTransferPerTx := big.NewInt(2) + gasPricePerTx := uint64(2) + gasLimitPerTx := uint64(2) advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() @@ -57,16 +59,16 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) - receiversPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) //receivers in same shard with the sender - sk, _, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - receiversPrivateKeys[senderShard] = append(receiversPrivateKeys[senderShard], sk) + _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) + receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) //receivers in other shards for _, shardId := range recvShards { - sk, _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) - receiversPrivateKeys[shardId] = append(receiversPrivateKeys[shardId], sk) + _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) + receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) } } @@ -74,7 +76,14 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { integrationTests.CreateMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) fmt.Println("Generating transactions...") - integrationTests.GenerateAndDisseminateTxs(proposerNode, sendersPrivateKeys, receiversPrivateKeys, valToTransferPerTx) + integrationTests.GenerateAndDisseminateTxs( + proposerNode, + sendersPrivateKeys, + receiversPublicKeys, + valToTransferPerTx, + gasPricePerTx, + gasLimitPerTx, + ) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) @@ -85,6 +94,10 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) } + gasPricePerTxBigInt := big.NewInt(int64(gasPricePerTx)) + gasLimitPerTxBigInt := big.NewInt(int64(gasLimitPerTx)) + gasValue := big.NewInt(0).Mul(gasPricePerTxBigInt, gasLimitPerTxBigInt) + totalValuePerTx := big.NewInt(0).Add(gasValue, valToTransferPerTx) fmt.Println("Test nodes from proposer shard to have the correct balances...") for _, n := range nodes { isNodeInSenderShard := n.ShardCoordinator.SelfId() == senderShard @@ -94,13 +107,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //test sender balances for _, sk := range sendersPrivateKeys { - valTransferred := big.NewInt(0).Mul(valToTransferPerTx, big.NewInt(int64(len(receiversPrivateKeys)))) + valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[proposerNode.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } @@ -118,8 +131,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[n.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } } diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go new file mode 100644 index 00000000000..c0ab6298109 --- /dev/null +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -0,0 +1,365 @@ +package block + +import ( + "context" + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func getRewardValue() uint32 { + //TODO: this should be read from protocol config + return uint32(1000) +} + +func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + gasPrice := uint64(10) + gasLimit := uint64(100) + valToTransfer := big.NewInt(100) + nbTxsPerShard := uint32(100) + mintValue := big.NewInt(1000000) + + generateIntraShardTransactions(nodesMap, nbTxsPerShard, mintValue, valToTransfer, gasPrice, gasLimit) + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + + randomness := generateInitialRandomness(uint32(nbShards)) + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for shardId, consensusGroup := range consensusNodes { + shardRewardData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() + addrRewards := shardRewardData.Addresses + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) + nbTxs := getTransactionsFromHeaderInShard(t, headers, shardId) + if len(addrRewards) > 0 { + updateNumberTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) + } + } + + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, gasPrice, gasLimit) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) +} + +func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + + randomness := generateInitialRandomness(uint32(nbShards)) + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for shardId, consensusGroup := range consensusNodes { + if shardId == sharding.MetachainShardId { + continue + } + + shardRewardsData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() + if shardRewardsData == nil { + shardRewardsData = &data.ConsensusRewardData{} + } + + addrRewards := shardRewardsData.Addresses + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) + } + + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, 0, 0) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) +} + +func generateIntraShardTransactions( + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + nbTxsPerShard uint32, + mintValue *big.Int, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) { + sendersPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) + + for shardId, nodes := range nodesMap { + if shardId == sharding.MetachainShardId { + continue + } + + sendersPrivateKeys[shardId], receiversPublicKeys[shardId] = integrationTests.CreateSendersAndReceiversInShard( + nodes[0], + nbTxsPerShard, + ) + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders( + nodes, + shardId, + sendersPrivateKeys[shardId], + mintValue, + ) + } + + integrationTests.CreateAndSendTransactions( + nodesMap, + sendersPrivateKeys, + receiversPublicKeys, + gasPrice, + gasLimit, + valToTransfer, + ) +} + +func getBlockProposersIndexes( + consensusMap map[uint32][]*integrationTests.TestProcessorNode, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, +) map[uint32]int { + + indexProposer := make(map[uint32]int) + + for sh, testNodeList := range nodesMap { + for k, testNode := range testNodeList { + if consensusMap[sh][0] == testNode { + indexProposer[sh] = k + } + } + } + + return indexProposer +} + +func generateInitialRandomness(nbShards uint32) map[uint32][]byte { + randomness := make(map[uint32][]byte) + + for i := uint32(0); i < nbShards; i++ { + randomness[i] = []byte("root hash") + } + + randomness[sharding.MetachainShardId] = []byte("root hash") + + return randomness +} + +func getTransactionsFromHeaderInShard(t *testing.T, headers map[uint32]data.HeaderHandler, shardId uint32) uint32 { + if shardId == sharding.MetachainShardId { + return 0 + } + + header, ok := headers[shardId] + if !ok { + return 0 + } + + hdr, ok := header.(*block.Header) + if !ok { + assert.Error(t, process.ErrWrongTypeAssertion) + } + + nbTxs := uint32(0) + for _, mb := range hdr.MiniBlockHeaders { + if mb.SenderShardID == shardId && mb.Type == block.TxBlock { + nbTxs += mb.TxCount + } + } + + return nbTxs +} + +func updateExpectedRewards(rewardsForAddress map[string]uint32, addresses []string) { + for i := 0; i < len(addresses); i++ { + if addresses[i] == "" { + continue + } + + rewardsForAddress[addresses[i]]++ + } +} + +func updateNumberTransactionsProposed( + t *testing.T, + transactionsForLeader map[string]uint32, + addressProposer string, + nbTransactions uint32, +) { + if addressProposer == "" { + assert.Error(t, errors.New("invalid address")) + } + + transactionsForLeader[addressProposer] += nbTransactions +} + +func updateRewardsForMetachain(rewardsMap map[string]uint32, consensusNode *integrationTests.TestProcessorNode) { + metaRewardDataSlice := consensusNode.SpecialAddressHandler.ConsensusMetaRewardData() + if len(metaRewardDataSlice) > 0 { + for _, metaRewardData := range metaRewardDataSlice { + for _, addr := range metaRewardData.Addresses { + rewardsMap[addr]++ + } + } + } +} + +func verifyRewardsForMetachain( + t *testing.T, + mapRewardsForMeta map[string]uint32, + nodes map[uint32][]*integrationTests.TestProcessorNode, +) { + rewardValue := getRewardValue() + + for metaAddr, numOfTimesRewarded := range mapRewardsForMeta { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(metaAddr)) + acc, err := nodes[0][0].AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + expectedBalance := big.NewInt(int64(numOfTimesRewarded * rewardValue)) + assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) + } +} + +func verifyRewardsForShards( + t *testing.T, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + mapRewardsForAddress map[string]uint32, + nbTxsForLeaderAddress map[string]uint32, + gasPrice uint64, + gasLimit uint64, +) { + rewardValue := getRewardValue() + // TODO: fee percentage should be read from protocol config + feePerTxForLeader := gasPrice * gasLimit / 2 + + for address, nbRewards := range mapRewardsForAddress { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) + shard := nodesMap[0][0].ShardCoordinator.ComputeId(addrContainer) + + for _, shardNode := range nodesMap[shard] { + acc, err := shardNode.AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + nbProposedTxs := nbTxsForLeaderAddress[address] + expectedBalance := int64(nbRewards)*int64(rewardValue) + int64(nbProposedTxs)*int64(feePerTxForLeader) + fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) + assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) + } + } +} diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go new file mode 100644 index 00000000000..651638218fe --- /dev/null +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -0,0 +1,158 @@ +package block + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +const broadcastDelay = 2 * time.Second + +func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Shard node generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + + nodesMap[0][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain have the block header in pool as interceptor validates it + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard have the block in pool as interceptor validates it + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} + +func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Metachain node Generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( + sharding.MetachainShardId, + nodesMap, + round, + nonce, + randomness, + ) + + nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 46d4a8f92d9..48ae037e36c 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -22,7 +22,7 @@ var gasPrice = 1 var gasLimit = 1000 var initialValueForInternalVariable = uint64(45) -func createScCallsNodes() (p2p.Messenger, []*testNode) { +func createScCallsNodes() (p2p.Messenger, map[uint32][]*testNode) { advertiser := createMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() @@ -86,9 +86,10 @@ func haveTime() time.Duration { } // Test within a network of two shards the following situation -// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls within the same shard -// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and the gas -// is subtracted from the caller's balance +// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls +// within the same shard +// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and +// the gas is subtracted from the caller's balance func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -102,12 +103,14 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] + proposerNodeShard1 := nodes[0][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -116,7 +119,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { senderAddressBytes := []byte("12345678901234567890123456789012") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, senderMintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, senderMintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -181,13 +184,15 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] - proposerNodeShard2 := nodes[1] + proposerNodeShard1 := nodes[0][0] + proposerNodeShard2 := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -197,8 +202,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond secondShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, mintingValue) - createMintingForSenders(nodes, receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -237,10 +242,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond // Test again that the gas for calling the smart contract was subtracted from the sender's account acc, _ = proposerNodeShard2.node.GetAccount(hex.EncodeToString(secondShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should subtract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) - + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) receiverNonce++ assert.Equal(t, receiverNonce, acc.Nonce) @@ -278,13 +281,15 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShardSC := nodes[0] - proposerNodeShardAccount := nodes[1] + proposerNodeShardSC := nodes[0][0] + proposerNodeShardAccount := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -294,8 +299,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond accountShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, scShard, [][]byte{scAccountAddressBytes}, mintingValue) - createMintingForSenders(nodes, accShard, [][]byte{accountShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scAccountAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], accShard, [][]byte{accountShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShardSC, generalRoundNumber, scAccountAddressBytes, accNonce) @@ -314,7 +319,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond scDeploymentAdddress, _ := hex.DecodeString("00000000000000000000cca1490e8cd87c767da41cdab632a7a206c5703c3132") // Update the SC account balance so we can call withdraw function - createMintingForSenders(nodes, scShard, [][]byte{scDeploymentAdddress}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scDeploymentAdddress}, mintingValue) // Now that the SC is deployed, we test a call from an account located in the second shard withdrawValue := uint64(100) @@ -328,8 +333,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond withdrawValue, ) - // The account shard should process this tx as MoveBalance scNonce++ + // The account shard should process this tx as MoveBalance processAndTestSmartContractCallInSender( t, contractCallTx, @@ -339,6 +344,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond mintingValue, scNonce, ) + generalRoundNumber++ // After second shard processed the transaction, tx should get into the first shard where the SC resides @@ -382,9 +388,8 @@ func processAndTestSmartContractCallInSender( // Test again that the gas for calling the smart contract was subtracted from the sender's account acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should subtract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) assert.Equal(t, scNonce, acc.Nonce) } @@ -443,7 +448,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo // - Initial balance + withdraw value - fees // TODO: Fees and gas should be taken into consideration when the fees are implemented - now we have extra money // from the gas returned since the gas was not subtracted in the first place - finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue+uint64(gasLimit-1*gasPrice)))) + finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue-1))) acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) assert.Equal(t, finalValue, acc.Balance) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index d8feb868fab..90024f575a8 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -48,6 +48,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" @@ -55,7 +56,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) @@ -102,6 +103,80 @@ type testNode struct { txsRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createTestShardChain() *blockchain.BlockChain { cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) @@ -132,6 +207,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) for i := uint32(0); i < numOfShards; i++ { @@ -145,6 +221,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -164,6 +241,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -191,12 +269,14 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, + params *cryptoParams, + keysIndex int, ) ( *node.Node, p2p.Messenger, - crypto.PrivateKey, dataRetriever.ResolversFinder, process.BlockProcessor, process.TransactionProcessor, @@ -206,22 +286,9 @@ func createNetNode( dataRetriever.StorageService) { messenger := createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if shardCoordinator.ComputeId(addr) == targetShardId { - break - } - sk, pk = keyGen.GeneratePair() - } - - pkBuff, _ := pk.ToByteArray() - fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestShardChain() store := createTestShardStore(shardCoordinator.NumberOfShards()) @@ -231,16 +298,16 @@ func createNetNode( interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( accntAdapter, shardCoordinator, + nodesCoordinator, messenger, store, testMarshalizer, testHasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, testMultiSig, dPool, testAddressConverter, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -258,18 +325,41 @@ func createNetNode( ) resolversContainer, _ := resolversContainerFactory.Create() resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, 100) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.HeadersTopic, + factory.MetachainBlocksTopic, + 100, + ) interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, testMarshalizer, testHasher, testAddressConverter, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), store, + dPool, ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + rewardsInter, + ) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) vmContainer := &mock.VMContainerMock{ GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { @@ -286,8 +376,11 @@ func createNetNode( addrConv, shardCoordinator, scForwarder, + rewardsHandler, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) + txProcessor, _ := transaction.NewTxProcessor( accntAdapter, testHasher, @@ -295,6 +388,8 @@ func createNetNode( testMarshalizer, shardCoordinator, scProcessor, + rewardsHandler, + txTypeHandler, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -309,6 +404,8 @@ func createNetNode( txProcessor, scProcessor, scProcessor, + rewardProcessor, + internalTxProducer, ) container, _ := fact.Create() @@ -341,10 +438,16 @@ func createNetNode( Marshalizer: testMarshalizer, Store: store, ShardCoordinator: shardCoordinator, - Uint64Converter: uint64Converter, - StartHeaders: genesisBlocks, - RequestHandler: requestHandler, - Core: &mock.ServiceContainerMock{}, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), + Uint64Converter: uint64Converter, + StartHeaders: genesisBlocks, + RequestHandler: requestHandler, + Core: &mock.ServiceContainerMock{}, }, DataPool: dPool, BlocksTracker: &mock.BlocksTrackerMock{ @@ -372,14 +475,14 @@ func createNetNode( node.WithDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithTxSignPrivKey(sk), - node.WithTxSignPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolversFinder), node.WithBlockProcessor(blockProcessor), @@ -391,7 +494,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -422,18 +525,20 @@ func getConnectableAddress(mes p2p.Messenger) string { return "" } -func displayAndStartNodes(nodes []*testNode) { - for _, n := range nodes { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() +func displayAndStartNodes(nodes map[uint32][]*testNode) { + for _, nodeList := range nodes { + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } } } @@ -441,14 +546,17 @@ func createNodes( numOfShards int, nodesPerShard int, serviceID string, -) []*testNode { - +) map[uint32][]*testNode { //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 - nodes := make([]*testNode, int(numOfShards)*nodesPerShard+numMetaChainNodes) + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) - idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { + shardNodes := make([]*testNode, nodesPerShard) + for j := 0; j < nodesPerShard; j++ { testNode := &testNode{ dPool: createTestShardDataPool(), @@ -456,20 +564,33 @@ func createNodes( } shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + validatorsMap, + ) + accntAdapter := createAccountsDB() - n, mes, sk, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, + nodesCoordinator, testNode.shardId, serviceID, + cp, + j, ) _ = n.CreateShardedStores() + KeyPair := cp.keys[uint32(shardId)][j] testNode.node = n - testNode.sk = sk + testNode.sk = KeyPair.sk testNode.messenger = mes - testNode.pk = sk.GeneratePublic() + testNode.pk = KeyPair.pk testNode.resFinder = resFinder testNode.accntState = accntAdapter testNode.blkProcessor = blkProcessor @@ -505,27 +626,41 @@ func createNodes( testMarshalizer, mes, shardCoordinator, - sk, + KeyPair.sk, &singlesig.SchnorrSigner{}, ) - nodes[idx] = testNode - idx++ + shardNodes[j] = testNode } + + nodes[uint32(shardId)] = shardNodes } - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - tn := createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - serviceID, - ) + metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { - idx := i + int(numOfShards)*nodesPerShard - nodes[idx] = tn + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + validatorsMap, + ) + + metaNodes[i] = createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + cp, + i, + ) } + nodes[sharding.MetachainShardId] = metaNodes + return nodes } @@ -579,18 +714,17 @@ func createMetaNetNode( dPool dataRetriever.MetaPoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { tn := testNode{} tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) tn.blkc = createTestMetaChain() @@ -599,13 +733,13 @@ func createMetaNetNode( interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, testHasher, testMultiSig, dPool, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -642,6 +776,12 @@ func createMetaNetNode( }, }, shardCoordinator, + nodesCoordinator, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), testHasher, testMarshalizer, store, @@ -658,8 +798,8 @@ func createMetaNetNode( testMarshalizer, tn.messenger, shardCoordinator, - sk, - singleSigner, + keyPair.sk, + params.singleSigner, ) n, err := node.NewNode( @@ -669,14 +809,14 @@ func createMetaNetNode( node.WithMetaDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(tn.blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithPrivKey(sk), - node.WithPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolvers), node.WithBlockProcessor(tn.blkProcessor), @@ -689,8 +829,8 @@ func createMetaNetNode( } tn.node = n - tn.sk = sk - tn.pk = pk + tn.sk = keyPair.sk + tn.pk = keyPair.pk tn.accntState = accntAdapter tn.shardId = sharding.MetachainShardId diff --git a/integrationTests/node/getAccount_test.go b/integrationTests/node/getAccount_test.go index f190a56760f..4cb51e19e6f 100644 --- a/integrationTests/node/getAccount_test.go +++ b/integrationTests/node/getAccount_test.go @@ -13,7 +13,7 @@ import ( func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB, _, _ := integrationTests.CreateAccountsDB(0) n, _ := node.NewNode( node.WithAccountsAdapter(accDB), @@ -32,7 +32,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB, _, _ := integrationTests.CreateAccountsDB(0) addressHex := integrationTests.CreateRandomHexString(64) addressBytes, _ := hex.DecodeString(addressHex) diff --git a/integrationTests/singleShard/block/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc_test.go index dc06459908c..b8a425f5f6a 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc_test.go @@ -38,7 +38,12 @@ func TestShouldProcessWithScTxsJoinAndRewardOneRound(t *testing.T) { nodes := make([]*integrationTests.TestProcessorNode, numOfNodes) for i := 0; i < numOfNodes; i++ { - nodes[i] = integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) + nodes[i] = integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) } idxProposer := 0 diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index e71a1cf9d7c..735edd85e83 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -69,7 +69,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { } hdr2 := block.Header{ - Nonce: 1, + Nonce: 0, PubKeysBitmap: []byte{255, 0}, Signature: []byte("signature"), PrevHash: []byte("prev hash"), diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index ae197e8e0ff..e4cdcd8d476 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process" @@ -102,3 +103,83 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { assert.Fail(t, "timeout") } } + +func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + //Step 1. Generate a reward Transaction + tx := rewardTx.RewardTx{ + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + Round: 0, + Epoch: 0, + ShardId: 0, + } + + marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { + rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(rewardTxStored, &tx) { + chanDone <- true + } + + assert.Equal(t, rewardTxStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.RewardTransactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + err = rewardTxResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/state/genesisState_test.go b/integrationTests/state/genesisState_test.go index 34c8ba78aba..55760a98e8a 100644 --- a/integrationTests/state/genesisState_test.go +++ b/integrationTests/state/genesisState_test.go @@ -299,7 +299,7 @@ func printTestDebugLines( } func getRootHashByRunningInitialBalances(initialBalances []*sharding.InitialBalance) ([]byte, state.AccountsAdapter) { - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) uniformIndexes := make([]int, len(initialBalances)) for i := 0; i < len(initialBalances); i++ { diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index 3f2df6ab972..b903b2f5553 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -19,7 +19,7 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { t.Skip("this is not a short test") } - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) nonce := uint64(6) balance := big.NewInt(10000) @@ -28,12 +28,14 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { address := integrationTests.CreateAccount(accnts, nonce, balance) hashCreated, _ := accnts.Commit() - //Step 2. create a tx moving 1 from address to address + //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + GasLimit: 2, + GasPrice: 1, + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -42,6 +44,8 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { hashAfterExec, _ := accnts.Commit() assert.NotEqual(t, hashCreated, hashAfterExec) + balance = balance.Sub(balance, big.NewInt(0).SetUint64(tx.GasPrice*tx.GasLimit)) + accountAfterExec, _ := accnts.GetAccountWithJournal(address) assert.Equal(t, nonce+1, accountAfterExec.(*state.Account).Nonce) assert.Equal(t, balance, accountAfterExec.(*state.Account).Balance) @@ -50,8 +54,9 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) + nonce := uint64(6) balance := big.NewInt(10000) @@ -61,10 +66,12 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), + GasLimit: 2, + GasPrice: 2, } err := txProcessor.ProcessTransaction(tx, 0) @@ -80,7 +87,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) @@ -106,15 +113,20 @@ func testExecTransactionsMoreTxWithRevert( ) { txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) - txToGenerate := 15000 + txToGenerate := 15000 + gasPrice := uint64(2) + gasLimit := uint64(2) + value := uint64(1) //Step 1. execute a lot moving transactions from pubKeyBuff to another pubKeyBuff for i := 0; i < txToGenerate; i++ { tx := &transaction.Transaction{ - Nonce: initialNonce + uint64(i), - Value: big.NewInt(1), - SndAddr: sender.Bytes(), - RcvAddr: receiver.Bytes(), + Nonce: initialNonce + uint64(i), + Value: big.NewInt(int64(value)), + GasPrice: gasPrice, + GasLimit: gasLimit, + SndAddr: sender.Bytes(), + RcvAddr: receiver.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -129,7 +141,7 @@ func testExecTransactionsMoreTxWithRevert( newAccount, _ := accnts.GetAccountWithJournal(receiver) account, _ := accnts.GetAccountWithJournal(sender) - assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(txToGenerate))) + assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(uint64(txToGenerate)*(gasPrice*gasLimit+value)))) assert.Equal(t, account.(*state.Account).Nonce, uint64(txToGenerate)+initialNonce) assert.Equal(t, newAccount.(*state.Account).Balance, big.NewInt(int64(txToGenerate))) @@ -161,7 +173,7 @@ func testExecTransactionsMoreTxWithRevert( func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 325799355f5..33cdef8868b 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -152,7 +152,7 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { t.Parallel() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) wg := sync.WaitGroup{} wg.Add(2000) @@ -283,7 +283,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te //verifies that commit saves the new tries and that can be loaded back t.Parallel() - adb, _, mu := integrationTests.CreateAccountsDB(nil) + adb, _, mu := integrationTests.CreateAccountsDB(0) adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -348,7 +348,7 @@ func TestAccountsDB_CommitAnEmptyStateShouldWork(t *testing.T) { } }() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) hash, err := adb.Commit() @@ -418,7 +418,7 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -489,7 +489,7 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -561,7 +561,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -627,7 +627,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -702,7 +702,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -797,7 +797,7 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -850,7 +850,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -883,7 +883,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -1048,7 +1048,7 @@ func BenchmarkTxExecution(b *testing.B) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(b, err) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 8ed852a0439..18705efa9d8 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -149,8 +149,8 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { time.Sleep(stepSync) - pubKeysVariant1 := []byte("1") - pubKeysVariant2 := []byte("2") + pubKeysVariant1 := []byte{3} + pubKeysVariant2 := []byte{1} proposeBlockWithPubKeyBitmap(nodes[idxProposerShard0], round, nonce, pubKeysVariant1) proposeBlockWithPubKeyBitmap(nodes[1], round, nonce, pubKeysVariant2) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index f2cc989a9ce..185f77a2af2 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -31,6 +31,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/p2p" @@ -89,29 +90,31 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: 1}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -165,6 +168,7 @@ func CreateShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) for i := uint32(0); i < numOfShards; i++ { @@ -189,18 +193,13 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage -func CreateAccountsDB(shardCoordinator sharding.Coordinator) (*state.AccountsDB, data.Trie, storage.Storer) { - - var accountFactory state.AccountFactory - if shardCoordinator == nil { - accountFactory = factory.NewAccountCreator() - } else { - accountFactory, _ = factory.NewAccountFactoryCreator(shardCoordinator) - } - +func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { + hasher := sha256.Sha256{} store := CreateMemUnit() - tr, _ := trie.NewTrie(store, TestMarshalizer, TestHasher) - adb, _ := state.NewAccountsDB(tr, TestHasher, TestMarshalizer, accountFactory) + + tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) return adb, tr, store } @@ -368,7 +367,7 @@ func CreateRandomHexString(chars int) string { // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { adr := CreateRandomAddress() - adb, _, _ := CreateAccountsDB(nil) + adb, _, _ := CreateAccountsDB(factory.UserAccount) account, _ := state.NewAccount(adr, adb) return adr, account, adb @@ -423,7 +422,16 @@ func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.I // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor(accnts, TestHasher, TestAddressConverter, TestMarshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := txProc.NewTxProcessor( + accnts, + TestHasher, + TestAddressConverter, + TestMarshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) return txProcessor } @@ -496,7 +504,12 @@ func IncrementAndPrintRound(round uint64) uint64 { // ProposeBlock proposes a block for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { fmt.Println("All shards propose blocks...") + for idx, n := range nodes { + // set the consensus reward addresses as rewards processor expects at least valid round + // otherwise the produced rewards will not be valid on verification + n.BlockProcessor.SetConsensusData([]byte("randomness"), round, 0, n.ShardCoordinator.SelfId()) + if !IsIntInSlice(idx, idxProposers) { continue } @@ -660,9 +673,9 @@ func CreateNodes( nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) idx := 0 - for shardId := 0; shardId < numOfShards; shardId++ { + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), uint32(shardId), uint32(shardId), serviceID) + n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) nodes[idx] = n idx++ @@ -671,7 +684,7 @@ func CreateNodes( for i := 0; i < numMetaChainNodes; i++ { metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) - idx := i + numOfShards*nodesPerShard + idx = i + numOfShards*nodesPerShard nodes[idx] = metaNode } @@ -701,27 +714,20 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { func GenerateAndDisseminateTxs( n *TestProcessorNode, senders []crypto.PrivateKey, - receiversPrivateKeys map[uint32][]crypto.PrivateKey, + receiversPublicKeysMap map[uint32][]crypto.PublicKey, valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { for i := 0; i < len(senders); i++ { senderKey := senders[i] - incrementalNonce := uint64(0) - for _, recvPrivateKeys := range receiversPrivateKeys { - receiverKey := recvPrivateKeys[i] - tx := generateTx( - senderKey, - n.OwnAccount.SingleSigner, - &txArgs{ - nonce: incrementalNonce, - value: valToTransfer, - rcvAddr: skToPk(receiverKey), - sndAddr: skToPk(senderKey), - }, - ) + incrementalNonce := make([]uint64, len(senders)) + for _, shardReceiversPublicKeys := range receiversPublicKeysMap { + receiverPubKey := shardReceiversPublicKeys[i] + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverPubKey, valToTransfer, gasPrice, gasLimit) _, _ = n.SendTransaction(tx) - incrementalNonce++ + incrementalNonce[i]++ } } } @@ -736,6 +742,32 @@ type txArgs struct { gasLimit int } +func generateTransferTx( + nonce uint64, + senderPrivateKey crypto.PrivateKey, + receiverPublicKey crypto.PublicKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) *transaction.Transaction { + + receiverPubKeyBytes, _ := receiverPublicKey.ToByteArray() + tx := transaction.Transaction{ + Nonce: nonce, + Value: valToTransfer, + RcvAddr: receiverPubKeyBytes, + SndAddr: skToPk(senderPrivateKey), + Data: "", + GasLimit: gasLimit, + GasPrice: gasPrice, + } + txBuff, _ := TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(senderPrivateKey, txBuff) + + return &tx +} + func generateTx( skSign crypto.PrivateKey, signer crypto.SingleSigner, @@ -761,6 +793,14 @@ func skToPk(sk crypto.PrivateKey) []byte { return pkBuff } +// TestPublicKeyHasBalance checks if the account corresponding to the given public key has the expected balance +func TestPublicKeyHasBalance(t *testing.T, n *TestProcessorNode, pk crypto.PublicKey, expectedBalance *big.Int) { + pkBuff, _ := pk.ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetExistingAccount(addr) + assert.Equal(t, expectedBalance, account.(*state.Account).Balance) +} + // TestPrivateKeyHasBalance checks if the private key has the expected balance func TestPrivateKeyHasBalance(t *testing.T, n *TestProcessorNode, sk crypto.PrivateKey, expectedBalance *big.Int) { pkBuff, _ := sk.GeneratePublic().ToByteArray() @@ -795,6 +835,11 @@ func GenerateSkAndPkInShard( keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() + if shardId == sharding.MetachainShardId { + // for metachain generate in shard 0 + shardId = 0 + } + for { pkBytes, _ := pk.ToByteArray() addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) @@ -807,6 +852,55 @@ func GenerateSkAndPkInShard( return sk, pk, keyGen } +// CreateSendersAndReceiversInShard creates given number of sender private key and receiver public key pairs, +// with account in same shard as given node +func CreateSendersAndReceiversInShard( + nodeInShard *TestProcessorNode, + nbSenderReceiverPairs uint32, +) ([]crypto.PrivateKey, []crypto.PublicKey) { + shardId := nodeInShard.ShardCoordinator.SelfId() + receiversPublicKeys := make([]crypto.PublicKey, nbSenderReceiverPairs) + sendersPrivateKeys := make([]crypto.PrivateKey, nbSenderReceiverPairs) + + for i := uint32(0); i < nbSenderReceiverPairs; i++ { + sendersPrivateKeys[i], _, _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + _, receiversPublicKeys[i], _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + } + + return sendersPrivateKeys, receiversPublicKeys +} + +// CreateAndSendTransactions creates and sends transactions between given senders and receivers. +func CreateAndSendTransactions( + nodes map[uint32][]*TestProcessorNode, + sendersPrivKeysMap map[uint32][]crypto.PrivateKey, + receiversPubKeysMap map[uint32][]crypto.PublicKey, + gasPricePerTx uint64, + gasLimitPerTx uint64, + valueToTransfer *big.Int, +) { + for shardId := range nodes { + if shardId == sharding.MetachainShardId { + continue + } + + nodeInShard := nodes[shardId][0] + + fmt.Println("Generating transactions...") + GenerateAndDisseminateTxs( + nodeInShard, + sendersPrivKeysMap[shardId], + receiversPubKeysMap, + valueToTransfer, + gasPricePerTx, + gasLimitPerTx, + ) + } + + fmt.Println("Delaying for disseminating transactions...") + time.Sleep(time.Second * 5) +} + // CreateMintingForSenders creates account with balances for every node in a given shard func CreateMintingForSenders( nodes []*TestProcessorNode, @@ -895,13 +989,42 @@ func ComputeAndRequestMissingTransactions( } } +// ComputeAndRequestMissingRewardTxs computes the missing reward transactions for each node and requests them +func ComputeAndRequestMissingRewardTxs( + nodes []*TestProcessorNode, + generatedDataHashes [][]byte, + shardResolver uint32, + shardRequesters ...uint32, +) { + for _, n := range nodes { + if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { + continue + } + + neededData := getMissingRewardTxsForNode(n, generatedDataHashes) + requestMissingRewardTxs(n, shardResolver, neededData) + } +} + func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { neededTxs := make([][]byte, 0) for i := 0; i < len(generatedTxHashes); i++ { _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) if !ok { - //tx is still missing + neededTxs = append(neededTxs, generatedTxHashes[i]) + } + } + + return neededTxs +} + +func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { + neededTxs := make([][]byte, 0) + + for i := 0; i < len(generatedTxHashes); i++ { + _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) + if !ok { neededTxs = append(neededTxs, generatedTxHashes[i]) } } @@ -917,41 +1040,42 @@ func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, need } } +func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededData [][]byte) { + dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) + + for i := 0; i < len(neededData); i++ { + _ = dataResolver.RequestDataFromHash(neededData[i]) + } +} + // CreateRequesterDataPool creates a datapool with a mock txPool -func CreateRequesterDataPool( - t *testing.T, - recvTxs map[int]map[string]struct{}, - mutRecvTxs *sync.Mutex, - nodeIndex int, -) dataRetriever.PoolsHolder { +func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int) dataRetriever.PoolsHolder { //not allowed to request data from the same shard - return CreateTestShardDataPool( - &mock.ShardedDataStub{ - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil, false - }, - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - - txMap := recvTxs[nodeIndex] - if txMap == nil { - txMap = make(map[string]struct{}) - recvTxs[nodeIndex] = txMap - } + return CreateTestShardDataPool(&mock.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil, false + }, + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + + txMap := recvTxs[nodeIndex] + if txMap == nil { + txMap = make(map[string]struct{}) + recvTxs[nodeIndex] = txMap + } - txMap[string(key)] = struct{}{} - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, + txMap[string(key)] = struct{}{} }, - ) + RegisterHandlerCalled: func(i func(key []byte)) { + }, + }) } // CreateResolversDataPool creates a datapool containing a given number of transactions @@ -991,7 +1115,7 @@ func generateValidTx( _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) pkRecvBuff, _ := pkRecv.ToByteArray() - accnts, _, _ := CreateAccountsDB(shardCoordinator) + accnts, _, _ := CreateAccountsDB(factory.UserAccount) addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) _, _ = accnts.GetAccountWithJournal(addrSender) _, _ = accnts.Commit() @@ -1116,3 +1240,75 @@ func WaitForBootstrapAndShowConnected(peers []p2p.Messenger, durationBootstrapin fmt.Printf("Peer %s is connected to %d peers\n", peer.ID().Pretty(), len(peer.ConnectedPeers())) } } + +// PubKeysMapFromKeysMap returns a map of public keys per shard from the key pairs per shard map. +func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.Pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + +// GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map +func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string, nbShards uint32) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(nbShards, shardId) + for i := 0; i < len(shardNodesPks); i++ { + _, pk, _ := GenerateSkAndPkInShard(shardCoordinator, shardId) + address, err := pk.ToByteArray() + if err != nil { + return nil + } + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +// CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes +func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := uint32(0); shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[shardId] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &CryptoParams{ + Keys: keysMap, + KeyGen: keyGen, + SingleSigner: singleSigner, + } + + return params +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 9b8f138e970..860d15a6e0e 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -34,6 +35,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" @@ -57,13 +59,29 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() +// TestKeyPair holds a pair of private/public Keys +type TestKeyPair struct { + Sk crypto.PrivateKey + Pk crypto.PublicKey +} + +//CryptoParams holds crypto parametres +type CryptoParams struct { + KeyGen crypto.KeyGenerator + Keys map[uint32][]*TestKeyPair + SingleSigner crypto.SingleSigner +} + // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - Messenger p2p.Messenger + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Messenger p2p.Messenger OwnAccount *TestWalletAccount + NodeKeys *TestKeyPair ShardDataPool dataRetriever.PoolsHolder MetaDataPool dataRetriever.MetaPoolsHolder @@ -86,6 +104,7 @@ type TestProcessorNode struct { BlockchainHook vmcommon.BlockchainHook ArgsParser process.ArgumentsParser ScProcessor process.SmartContractProcessor + RewardsProcessor process.RewardTransactionProcessor PreProcessorsContainer process.PreProcessorsContainer ForkDetector process.ForkDetector @@ -95,6 +114,8 @@ type TestProcessorNode struct { Bootstrapper process.Bootstrapper Rounder *mock.RounderMock + MultiSigner crypto.MultiSigner + //Node is used to call the functionality already implemented in it Node *node.Node ScDataGetter external.ScDataGetter @@ -105,16 +126,31 @@ type TestProcessorNode struct { CounterMetaRcv int32 } -// NewTestProcessorNode returns a new TestProcessorNode instance without sync capabilities -func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string) *TestProcessorNode { +// NewTestProcessorNode returns a new TestProcessorNode instance +func NewTestProcessorNode( + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, +) *TestProcessorNode { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNode() @@ -127,11 +163,21 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { tpn.ShardDataPool = dPool @@ -144,9 +190,13 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 } func (tpn *TestProcessorNode) initTestNode() { - tpn.initRounder() + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + ) tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) tpn.initInterceptors() @@ -174,10 +224,6 @@ func (tpn *TestProcessorNode) initDataPools() { } } -func (tpn *TestProcessorNode) initRounder() { - tpn.Rounder = &mock.RounderMock{} -} - func (tpn *TestProcessorNode) initStorage() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) @@ -199,13 +245,13 @@ func (tpn *TestProcessorNode) initInterceptors() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, TestHasher, TestMultiSig, tpn.MetaDataPool, - &mock.ChronologyValidatorMock{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -216,6 +262,7 @@ func (tpn *TestProcessorNode) initInterceptors() { interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( tpn.AccntState, tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, @@ -225,7 +272,6 @@ func (tpn *TestProcessorNode) initInterceptors() { TestMultiSig, tpn.ShardDataPool, TestAddressConverter, - &mock.ChronologyValidatorMock{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -272,6 +318,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ResolverFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, @@ -290,10 +337,23 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, TestHasher, TestAddressConverter, + tpn.SpecialAddressHandler, tpn.Storage, + tpn.ShardDataPool, ) + tpn.InterimProcContainer, _ = interimProcFactory.Create() tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) + + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + rewardsInter, + ) tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) @@ -314,8 +374,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestAddressConverter, tpn.ShardCoordinator, tpn.ScrForwarder, + rewardsHandler, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) + tpn.TxProcessor, _ = transaction.NewTxProcessor( tpn.AccntState, TestHasher, @@ -323,6 +386,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, tpn.ShardCoordinator, tpn.ScProcessor, + rewardsHandler, + txTypeHandler, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -337,6 +402,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.TxProcessor, tpn.ScProcessor, tpn.ScProcessor.(process.SmartContractResultProcessor), + tpn.RewardsProcessor, + internalTxProducer, ) tpn.PreProcessorsContainer, _ = fact.Create() @@ -383,6 +450,8 @@ func (tpn *TestProcessorNode) initBlockProcessor() { tpn.MetaDataPool, tpn.ForkDetector, tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, TestHasher, TestMarshalizer, tpn.Storage, @@ -393,16 +462,18 @@ func (tpn *TestProcessorNode) initBlockProcessor() { } else { arguments := block.ArgShardProcessor{ ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, + Accounts: tpn.AccntState, + ForkDetector: tpn.ForkDetector, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, }, DataPool: tpn.ShardDataPool, BlocksTracker: tpn.BlockTracker, @@ -437,12 +508,15 @@ func (tpn *TestProcessorNode) initNode() { node.WithAccountsAdapter(tpn.AccntState), node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), node.WithShardCoordinator(tpn.ShardCoordinator), + node.WithNodesCoordinator(tpn.NodesCoordinator), node.WithBlockChain(tpn.BlockChain), node.WithUint64ByteSliceConverter(TestUint64Converter), - node.WithMultiSigner(TestMultiSig), + node.WithMultiSigner(tpn.MultiSigner), node.WithSingleSigner(tpn.OwnAccount.SingleSigner), node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), + node.WithPrivKey(tpn.NodeKeys.Sk), + node.WithPubKey(tpn.NodeKeys.Pk), node.WithInterceptorsContainer(tpn.InterceptorsContainer), node.WithResolversFinder(tpn.ResolverFinder), node.WithBlockProcessor(tpn.BlockProcessor), @@ -505,11 +579,11 @@ func (tpn *TestProcessorNode) addHandlersForCounters() { tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) + tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) } - } // StartSync calls Bootstrapper.StartSync. Errors if bootstrapper is not set @@ -777,3 +851,7 @@ func (tpn *TestProcessorNode) MiniBlocksPresent(hashes [][]byte) bool { return true } + +func (tpn *TestProcessorNode) initRounder() { + tpn.Rounder = &mock.RounderMock{} +} diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go new file mode 100644 index 00000000000..c25c6c1864f --- /dev/null +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -0,0 +1,292 @@ +package integrationTests + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +// NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator +func NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, +) *TestProcessorNode { + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + + llsig := &kmultisig.KyberMultiSignerBLS{} + blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + blsHasher, + pubKeysMap[nodeShardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + accountShardId := nodeShardId + if nodeShardId == sharding.MetachainShardId { + accountShardId = 0 + } + + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn +} + +// CreateNodesWithNodesCoordinator returns a map with nodes per shard each using a real nodes coordinator +func CreateNodesWithNodesCoordinator( + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, +) map[uint32][]*TestProcessorNode { + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys, uint32(nbShards)) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + TestHasher, + shardId, + uint32(nbShards), + validatorsMap, + ) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap +} + +// ProposeBlockWithConsensusSignature proposes +func ProposeBlockWithConsensusSignature( + shardId uint32, + nodesMap map[uint32][]*TestProcessorNode, + round uint64, + nonce uint64, + randomness []byte, +) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { + + nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) + if err != nil { + fmt.Println("Error getting the validators public keys: ", err) + } + + // set the consensus reward addresses + for _, node := range nodesMap[shardId] { + node.BlockProcessor.SetConsensusData(randomness, round, 0, shardId) + } + + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) + // first node is block proposer + body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) + header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) + + return body, header, txHashes, consensusNodes +} + +func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { + selectedNodes := make([]*TestProcessorNode, len(pubKeys)) + cntNodes := 0 + + for i, pk := range pubKeys { + for _, node := range nodes { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + if bytes.Equal(pubKeyBytes, []byte(pk)) { + selectedNodes[i] = node + cntNodes++ + } + } + } + + if cntNodes != len(pubKeys) { + fmt.Println("Error selecting nodes from public keys") + } + + return selectedNodes +} + +// DoConsensusSigningOnBlock simulates a consensus aggregated signature on the provided block +func DoConsensusSigningOnBlock( + blockHeader data.HeaderHandler, + consensusNodes []*TestProcessorNode, + pubKeys []string, +) data.HeaderHandler { + // set bitmap for all consensus nodes signing + bitmap := make([]byte, len(consensusNodes)/8+1) + for i := range bitmap { + bitmap[i] = 0xFF + } + + bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) + blockHeader.SetPubKeysBitmap(bitmap) + // clear signature, as we need to compute it below + blockHeader.SetSignature(nil) + blockHeader.SetPubKeysBitmap(nil) + blockHeaderHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, blockHeader) + + var msig crypto.MultiSigner + msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) + _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) + + for i := 1; i < len(consensusNodes); i++ { + msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) + sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) + _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) + } + + sig, _ := msigProposer.AggregateSigs(bitmap) + blockHeader.SetSignature(sig) + blockHeader.SetPubKeysBitmap(bitmap) + + return blockHeader +} + +// AllShardsProposeBlock simulates each shard selecting a consensus group and proposing/broadcasting/committing a block +func AllShardsProposeBlock( + round uint64, + nonce uint64, + prevRandomness map[uint32][]byte, + nodesMap map[uint32][]*TestProcessorNode, +) ( + map[uint32]data.BodyHandler, + map[uint32]data.HeaderHandler, + map[uint32][]*TestProcessorNode, + map[uint32][]byte, +) { + + body := make(map[uint32]data.BodyHandler) + header := make(map[uint32]data.HeaderHandler) + consensusNodes := make(map[uint32][]*TestProcessorNode) + newRandomness := make(map[uint32][]byte) + + // propose blocks + for i := range nodesMap { + body[i], header[i], _, consensusNodes[i] = ProposeBlockWithConsensusSignature(i, nodesMap, round, nonce, prevRandomness[i]) + newRandomness[i] = header[i].GetRandSeed() + } + + // propagate blocks + for i := range nodesMap { + consensusNodes[i][0].BroadcastBlock(body[i], header[i]) + consensusNodes[i][0].CommitBlock(body[i], header[i]) + } + + time.Sleep(2 * time.Second) + + return body, header, consensusNodes, newRandomness +} + +// SyncAllShardsWithRoundBlock enforces all nodes in each shard synchronizing the block for the given round +func SyncAllShardsWithRoundBlock( + t *testing.T, + nodesMap map[uint32][]*TestProcessorNode, + indexProposers map[uint32]int, + round uint64, +) { + for shard, nodeList := range nodesMap { + SyncBlock(t, nodeList, []int{indexProposers[shard]}, round) + } + time.Sleep(2 * time.Second) +} + +// VerifyNodesHaveHeaders verifies that each node has the corresponding header +func VerifyNodesHaveHeaders( + t *testing.T, + headers map[uint32]data.HeaderHandler, + nodesMap map[uint32][]*TestProcessorNode, +) { + var v interface{} + var ok bool + + // all nodes in metachain have the block headers in pool as interceptor validates them + for shHeader, header := range headers { + headerHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, header) + + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + if shHeader == sharding.MetachainShardId { + v, ok = metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + } else { + v, ok = metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + } + + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shards need to have their own shard headers and metachain headers + for sh, nodesList := range nodesMap { + if sh == sharding.MetachainShardId { + continue + } + + if sh != shHeader && shHeader != sharding.MetachainShardId { + continue + } + + for _, node := range nodesList { + if shHeader == sharding.MetachainShardId { + v, ok = node.ShardDataPool.MetaBlocks().Get(headerHash) + } else { + v, ok = node.ShardDataPool.Headers().Get(headerHash) + } + assert.True(t, ok) + assert.Equal(t, header, v) + } + } + } +} diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 81f4e20ba52..3b7cd83e355 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -23,13 +23,24 @@ func NewTestSyncNode( ) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNodeWithSync() @@ -40,9 +51,14 @@ func NewTestSyncNode( func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.initRounder() tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + ) tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() @@ -83,6 +99,8 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.MetaDataPool, tpn.ForkDetector, tpn.ShardCoordinator, + tpn.NodesCoordinator, + tpn.SpecialAddressHandler, TestHasher, TestMarshalizer, tpn.Storage, @@ -90,20 +108,23 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { tpn.RequestHandler, TestUint64Converter, ) + } else { tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder) arguments := block.ArgShardProcessor{ ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, + Accounts: tpn.AccntState, + ForkDetector: tpn.ForkDetector, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, }, DataPool: tpn.ShardDataPool, BlocksTracker: tpn.BlockTracker, diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index a6f2118c95b..c940c7db504 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" @@ -100,8 +101,24 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor } @@ -144,8 +161,24 @@ func CreateTxProcessorWithOneSCExecutorIeleVM( addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, + txTypeHandler, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor, blockChainHook } diff --git a/node/defineOptions.go b/node/defineOptions.go index e812025e30b..ed1a5b41eb3 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -266,6 +266,17 @@ func WithShardCoordinator(shardCoordinator sharding.Coordinator) Option { } } +// WithNodesCoordinator sets up the nodes coordinator +func WithNodesCoordinator(nodesCoordinator sharding.NodesCoordinator) Option { + return func(n *Node) error { + if nodesCoordinator == nil { + return ErrNilNodesCoordinator + } + n.nodesCoordinator = nodesCoordinator + return nil + } +} + // WithUint64ByteSliceConverter sets up the uint64 <-> []byte converter func WithUint64ByteSliceConverter(converter typeConverters.Uint64ByteSliceConverter) Option { return func(n *Node) error { diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 50f248f61dd..4be5f82c24e 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -544,6 +544,32 @@ func TestWithShardCoordinator_ShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestWithNodesCoordinator_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithNodesCoordinator(nil) + err := opt(node) + + assert.Nil(t, node.nodesCoordinator) + assert.Equal(t, ErrNilNodesCoordinator, err) +} + +func TestWithNodesCoordinator_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + nodesCoordinator := &mock.NodesCoordinatorMock{} + + opt := WithNodesCoordinator(nodesCoordinator) + err := opt(node) + + assert.True(t, node.nodesCoordinator == nodesCoordinator) + assert.Nil(t, err) +} + func TestWithUint64ByteSliceConverter_NilConverterShouldErr(t *testing.T) { t.Parallel() diff --git a/node/errors.go b/node/errors.go index 788d56d2c8c..99f0548f19e 100644 --- a/node/errors.go +++ b/node/errors.go @@ -58,6 +58,9 @@ var ErrNilDataPool = errors.New("trying to set nil data pool") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("trying to set nil shard coordinator") +// ErrNilNodesCoordinator signals that a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("trying to set nil nodes coordinator") + // ErrNilUint64ByteSliceConverter signals that a nil uint64 <-> byte slice converter has been provided var ErrNilUint64ByteSliceConverter = errors.New("trying to set nil uint64 - byte slice converter") diff --git a/node/heartbeat/hearbeatMessageInfo_test.go b/node/heartbeat/hearbeatMessageInfo_test.go index d10d8715b1b..7e8929df8cb 100644 --- a/node/heartbeat/hearbeatMessageInfo_test.go +++ b/node/heartbeat/hearbeatMessageInfo_test.go @@ -83,6 +83,7 @@ func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { // send heartbeat twice in order to calculate the duration between thm hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + time.Sleep(10 * time.Millisecond) hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") assert.True(t, hbmi.totalUpTime.Duration > time.Duration(0)) diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 928115b5241..f8033ae7f27 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -72,6 +72,10 @@ func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorStub) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorStub) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..2acd856559a --- /dev/null +++ b/node/mock/nodesCoordinatorMock.go @@ -0,0 +1,111 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/node/mock/poolsHolderStub.go b/node/mock/poolsHolderStub.go index 632f7e75ab5..1ea9c0a934e 100644 --- a/node/mock/poolsHolderStub.go +++ b/node/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher MetaHeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher @@ -48,6 +49,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/node/mock/validatorMock.go b/node/mock/validatorMock.go new file mode 100644 index 00000000000..e4f9bf01af8 --- /dev/null +++ b/node/mock/validatorMock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte + address []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/node/node.go b/node/node.go index 5b55d2821a0..2b704e22056 100644 --- a/node/node.go +++ b/node/node.go @@ -15,8 +15,6 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/chronology" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/consensus/validators" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/core/partitioning" @@ -90,6 +88,7 @@ type Node struct { metaDataPool dataRetriever.MetaPoolsHolder store dataRetriever.StorageService shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator consensusTopic string consensusType string @@ -278,11 +277,6 @@ func (n *Node) StartConsensus() error { return err } - validatorGroupSelector, err := n.createValidatorGroupSelector() - if err != nil { - return err - } - consensusDataContainer, err := spos.NewConsensusCore( n.blkc, n.blockProcessor, @@ -297,8 +291,9 @@ func (n *Node) StartConsensus() error { n.multiSigner, n.rounder, n.shardCoordinator, + n.nodesCoordinator, n.syncTimer, - validatorGroupSelector) + ) if err != nil { return err } @@ -452,37 +447,6 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { return consensusState, nil } -// createValidatorGroupSelector creates a index hashed group selector object -func (n *Node) createValidatorGroupSelector() (consensus.ValidatorGroupSelector, error) { - validatorGroupSelector, err := groupSelectors.NewIndexHashedGroupSelector(n.consensusGroupSize, n.hasher) - if err != nil { - return nil, err - } - - validatorsList := make([]consensus.Validator, 0) - shID := n.shardCoordinator.SelfId() - - if len(n.initialNodesPubkeys[shID]) == 0 { - return nil, errors.New("could not create validator group as shardID is out of range") - } - - for i := 0; i < len(n.initialNodesPubkeys[shID]); i++ { - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shID][i])) - if err != nil { - return nil, err - } - - validatorsList = append(validatorsList, validator) - } - - err = validatorGroupSelector.LoadEligibleList(validatorsList) - if err != nil { - return nil, err - } - - return validatorGroupSelector, nil -} - // createConsensusTopic creates a consensus topic for node func (n *Node) createConsensusTopic(messageProcessor p2p.MessageProcessor, shardCoordinator sharding.Coordinator) error { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 13528c1804f..1ea56ef41bd 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -228,11 +228,13 @@ func (n *Node) generateAndSignSingleTx( } tx := transaction.Transaction{ - Nonce: nonce, - Value: value, - RcvAddr: rcvAddrBytes, - SndAddr: sndAddrBytes, - Data: data, + Nonce: nonce, + Value: value, + GasLimit: 100, + GasPrice: 10, + RcvAddr: rcvAddrBytes, + SndAddr: sndAddrBytes, + Data: data, } marshalizedTx, err := n.marshalizer.Marshal(&tx) diff --git a/ntp/syncTime.go b/ntp/syncTime.go index 4241a78ec31..98aa38a767d 100644 --- a/ntp/syncTime.go +++ b/ntp/syncTime.go @@ -150,6 +150,8 @@ func (s *syncTime) formatTime(time time.Time) string { // CurrentTime method gets the current time on which is added the current offset func (s *syncTime) CurrentTime() time.Time { + s.mut.RLock() + defer s.mut.RUnlock() return time.Now().Add(s.clockOffset) } diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 8318711d641..604adc57c8d 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -15,16 +15,18 @@ import ( // ArgBaseProcessor holds all dependencies required by the process data factory in order to create // new instances type ArgBaseProcessor struct { - Accounts state.AccountsAdapter - ForkDetector process.ForkDetector - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - Store dataRetriever.StorageService - ShardCoordinator sharding.Coordinator - Uint64Converter typeConverters.Uint64ByteSliceConverter - StartHeaders map[uint32]data.HeaderHandler - RequestHandler process.RequestHandler - Core serviceContainer.Core + Accounts state.AccountsAdapter + ForkDetector process.ForkDetector + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Store dataRetriever.StorageService + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Uint64Converter typeConverters.Uint64ByteSliceConverter + StartHeaders map[uint32]data.HeaderHandler + RequestHandler process.RequestHandler + Core serviceContainer.Core } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 9b7b5c424df..8413b239092 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -30,14 +30,16 @@ type hashAndHdr struct { type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - forkDetector process.ForkDetector - hasher hashing.Hasher - marshalizer marshal.Marshalizer - store dataRetriever.StorageService - uint64Converter typeConverters.Uint64ByteSliceConverter - blockSizeThrottler process.BlockSizeThrottler + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + specialAddressHandler process.SpecialAddressHandler + accounts state.AccountsAdapter + forkDetector process.ForkDetector + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + uint64Converter typeConverters.Uint64ByteSliceConverter + blockSizeThrottler process.BlockSizeThrottler mutNotarizedHdrs sync.RWMutex notarizedHdrs mapShardHeaders @@ -497,6 +499,8 @@ func checkProcessorNilParameters( marshalizer marshal.Marshalizer, store dataRetriever.StorageService, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, uint64Converter typeConverters.Uint64ByteSliceConverter, ) error { @@ -518,6 +522,12 @@ func checkProcessorNilParameters( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return process.ErrNilShardCoordinator } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return process.ErrNilNodesCoordinator + } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return process.ErrNilSpecialAddressHandler + } if uint64Converter == nil || uint64Converter.IsInterfaceNil() { return process.ErrNilUint64Converter } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 3ddb5d4a6c4..39b272400e6 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3,12 +3,14 @@ package block_test import ( "bytes" "errors" + "math/big" "reflect" "testing" "time" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" @@ -47,68 +49,58 @@ func generateTestUnit() storage.Storer { return storer } +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) func() dataRetriever.ShardedDataCacherNotifier { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + func initDataPool(testHash []byte) *mock.PoolsHolderStub { + rewardTx := &rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: big.NewInt(10), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + txCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + unsignedTxCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + rewardTransactionsCalled := createShardedDataChacherNotifier(rewardTx, testHash) + sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxCalled, + RewardTransactionsCalled: rewardTransactionsCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, @@ -316,18 +308,27 @@ func (wr *wrongBody) IsInterfaceNil() bool { } func CreateMockArguments() blproc.ArgShardProcessor { + nodesCoordinator := mock.NewNodesCoordinatorMock() + shardCoordinator := mock.NewOneShardCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := blproc.ArgShardProcessor{ ArgBaseProcessor: &blproc.ArgBaseProcessor{ - Accounts: &mock.AccountsStub{}, - ForkDetector: &mock.ForkDetectorMock{}, - Hasher: &mock.HasherStub{}, - Marshalizer: &mock.MarshalizerMock{}, - Store: initStore(), - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - RequestHandler: &mock.RequestHandlerMock{}, - Core: &mock.ServiceContainerMock{}, + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: initStore(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, }, DataPool: initDataPool([]byte("")), BlocksTracker: &mock.BlocksTrackerMock{}, diff --git a/process/block/export_test.go b/process/block/export_test.go index d95ad40d310..196de84e15b 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -53,19 +53,27 @@ func (sp *shardProcessor) RemoveProcessedMetaBlocksFromPool(processedMetaHdrs [] } func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*shardProcessor, error) { - + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := ArgShardProcessor{ ArgBaseProcessor: &ArgBaseProcessor{ - Accounts: &mock.AccountsStub{}, - ForkDetector: &mock.ForkDetectorMock{}, - Hasher: &mock.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - Store: &mock.ChainStorerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: genesisBlocks, - RequestHandler: &mock.RequestHandlerMock{}, - Core: &mock.ServiceContainerMock{}, + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: &mock.ChainStorerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: genesisBlocks, + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, }, DataPool: tdp, BlocksTracker: &mock.BlocksTrackerMock{}, @@ -83,6 +91,8 @@ func NewMetaProcessorBasicSingleShard(mdp dataRetriever.MetaPoolsHolder, genesis mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 5af3b9aea39..b1493158f5b 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -1,8 +1,11 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -11,21 +14,27 @@ import ( // It implements Newer and Hashed interfaces type InterceptedHeader struct { *block.Header - multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator - hash []byte + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedHeader { return &InterceptedHeader{ - Header: &block.Header{}, - multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, + Header: &block.Header{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, + hasher: hasher, } } @@ -61,7 +70,7 @@ func (inHdr *InterceptedHeader) IntegrityAndValidity(coordinator sharding.Coordi return err } - return inHdr.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -106,25 +115,49 @@ func (inHdr *InterceptedHeader) Integrity(coordinator sharding.Coordinator) erro } } -func (inHdr *InterceptedHeader) validityCheck() error { - if inHdr.chronologyValidator == nil { - return process.ErrNilChronologyValidator +// VerifySig verifies the intercepted Header block signature +func (inHdr *InterceptedHeader) VerifySig() error { + randSeed := inHdr.GetPrevRandSeed() + bitmap := inHdr.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap } - return inHdr.chronologyValidator.ValidateReceivedBlock( - inHdr.ShardId, - inHdr.Epoch, - inHdr.Nonce, - inHdr.Round, - ) -} + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing -// VerifySig verifies a signature -func (inHdr *InterceptedHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers and in the bitmap - return nil + } + + consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed, inHdr.Round, inHdr.ShardId) + if err != nil { + return err + } + + verifier, err := inHdr.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(inHdr.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *inHdr.Header + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + hash, err := core.CalculateHash(inHdr.marshalizer, inHdr.hasher, headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(hash, bitmap) + + return err } func (inHdr *InterceptedHeader) validatePeerBlock() error { diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index 92b867e4ccc..dc99d3b8ea5 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -11,13 +11,12 @@ import ( ) func createTestInterceptedHeader() *block.InterceptedHeader { + return block.NewInterceptedHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - }, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } @@ -245,24 +244,6 @@ func TestInterceptedHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr(t * assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedHeader( - mock.NewMultiSigner(), - nil, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyType = block2.PeerBlock - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() @@ -283,7 +264,7 @@ func TestInterceptedHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = block2.PeerBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index a73503238ee..b0fbcdf0227 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -1,8 +1,11 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -11,21 +14,27 @@ import ( // It implements Newer and Hashed interfaces type InterceptedMetaHeader struct { *block.MetaBlock - multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator - hash []byte + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedMetaHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedMetaHeader { return &InterceptedMetaHeader{ - MetaBlock: &block.MetaBlock{}, - multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, + MetaBlock: &block.MetaBlock{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, + hasher: hasher, } } @@ -51,7 +60,7 @@ func (imh *InterceptedMetaHeader) IntegrityAndValidity(coordinator sharding.Coor return err } - return imh.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -98,30 +107,54 @@ func (imh *InterceptedMetaHeader) Integrity(coordinator sharding.Coordinator) er return nil } -func (imh *InterceptedMetaHeader) validityCheck() error { - if imh.chronologyValidator == nil || imh.chronologyValidator.IsInterfaceNil() { - return process.ErrNilChronologyValidator +// VerifySig verifies a signature +func (imh *InterceptedMetaHeader) VerifySig() error { + randSeed := imh.GetPrevRandSeed() + bitmap := imh.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap } - return imh.chronologyValidator.ValidateReceivedBlock( - sharding.MetachainShardId, - imh.Epoch, - imh.Nonce, - imh.Round, - ) -} + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing -// VerifySig verifies a signature -func (imh *InterceptedMetaHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers - return nil + } + + consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed, imh.Round, imh.GetShardID()) + if err != nil { + return err + } + + verifier, err := imh.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(imh.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *imh.MetaBlock + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + hash, err := core.CalculateHash(imh.marshalizer, imh.hasher, headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(hash, bitmap) + + return err } // IsInterfaceNil returns true if there is no value under the interface -func (imh *InterceptedMetaHeader) IsInterfaceNil() bool { - if imh == nil { +func (mb *InterceptedMetaHeader) IsInterfaceNil() bool { + if mb == nil { return true } return false diff --git a/process/block/interceptedMetaBlockHeader_test.go b/process/block/interceptedMetaBlockHeader_test.go index fdb2bc0af63..43dd2810320 100644 --- a/process/block/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedMetaBlockHeader_test.go @@ -13,11 +13,9 @@ import ( func createTestInterceptedMetaHeader() *block.InterceptedMetaHeader { return block.NewInterceptedMetaHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - }, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } @@ -283,23 +281,6 @@ func TestInterceptedMetaHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedMetaHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedMetaHeader( - mock.NewMultiSigner(), - nil, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedMetaHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() @@ -319,7 +300,7 @@ func TestInterceptedMetaHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedMetaHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.PrevRandSeed = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go index 6f41648e07f..10274db0c17 100644 --- a/process/block/interceptors/headerInterceptor.go +++ b/process/block/interceptors/headerInterceptor.go @@ -15,15 +15,15 @@ import ( // HeaderInterceptor represents an interceptor used for block headers type HeaderInterceptor struct { - marshalizer marshal.Marshalizer - storer storage.Storer - multiSigVerifier crypto.MultiSigVerifier - hasher hashing.Hasher - chronologyValidator process.ChronologyValidator - headers storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher - headerValidator process.HeaderValidator - shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + storer storage.Storer + multiSigVerifier crypto.MultiSigVerifier + hasher hashing.Hasher + headers storage.Cacher + headersNonces dataRetriever.Uint64SyncMapCacher + headerValidator process.HeaderValidator + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator } // NewHeaderInterceptor hooks a new interceptor for block headers @@ -36,7 +36,7 @@ func NewHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, ) (*HeaderInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -60,19 +60,19 @@ func NewHeaderInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator } hdrInterceptor := &HeaderInterceptor{ - marshalizer: marshalizer, - multiSigVerifier: multiSigVerifier, - hasher: hasher, - shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, - headers: headers, - headersNonces: headersNonces, - headerValidator: headerValidator, + marshalizer: marshalizer, + multiSigVerifier: multiSigVerifier, + hasher: hasher, + shardCoordinator: shardCoordinator, + headers: headers, + headersNonces: headersNonces, + headerValidator: headerValidator, + nodesCoordinator: nodesCoordinator, } return hdrInterceptor, nil @@ -88,7 +88,7 @@ func (hi *HeaderInterceptor) ParseReceivedMessage(message p2p.MessageP2P) (*bloc return nil, process.ErrNilDataToProcess } - hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.chronologyValidator) + hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.nodesCoordinator, hi.marshalizer, hi.hasher) err := hi.marshalizer.Unmarshal(hdrIntercepted, message.Data()) if err != nil { return nil, err diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 7e5d84b9e9f..1740a69def5 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -3,6 +3,8 @@ package interceptors_test import ( "bytes" "errors" + "fmt" + "math/big" "sync" "testing" "time" @@ -14,11 +16,38 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) var durTimeout = time.Second +func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32][]sharding.Validator { + nodes := make(map[uint32][]sharding.Validator) + + for shard := uint32(0); shard < nbShards; shard++ { + shardNodes := make([]sharding.Validator, 0) + for valIdx := uint32(0); valIdx < shardSize; valIdx++ { + pk := fmt.Sprintf("pubKey_sh%d_node%d", shard, valIdx) + addr := fmt.Sprintf("address_sh%d_node%d", shard, valIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) + shardNodes = append(shardNodes, v) + } + nodes[shard] = shardNodes + } + + metaNodes := make([]sharding.Validator, 0) + for mValIdx := uint32(0); mValIdx < metachainSize; mValIdx++ { + pk := fmt.Sprintf("pubKey_meta_node%d", mValIdx) + addr := fmt.Sprintf("address_meta_node%d", mValIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) + metaNodes = append(metaNodes, v) + } + nodes[sharding.MetachainShardId] = metaNodes + + return nodes +} + //------- NewHeaderInterceptor func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { @@ -36,7 +65,7 @@ func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -57,7 +86,7 @@ func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -78,7 +107,7 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) @@ -99,7 +128,7 @@ func TestNewHeaderInterceptor_NilHeaderHandlerValidatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHeaderHandlerValidator, err) @@ -121,7 +150,7 @@ func TestNewHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilMultiSigVerifier, err) @@ -143,7 +172,7 @@ func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -165,7 +194,7 @@ func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -190,7 +219,7 @@ func TestNewHeaderInterceptor_NilChronologyValidatorShouldErr(t *testing.T) { nil, ) - assert.Equal(t, process.ErrNilChronologyValidator, err) + assert.Equal(t, process.ErrNilNodesCoordinator, err) assert.Nil(t, hi) } @@ -209,7 +238,7 @@ func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, err) @@ -234,7 +263,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilMessageShouldErr(t *testing.T) mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) hdr, err := hi.ParseReceivedMessage(nil) @@ -258,7 +287,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilDataToProcessShouldErr(t *test mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{} @@ -289,7 +318,7 @@ func TestHeaderInterceptor_ParseReceivedMessageMarshalizerErrorsAtUnmarshalingSh mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{ @@ -309,11 +338,9 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -321,12 +348,12 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -352,11 +379,9 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -364,16 +389,16 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -414,7 +439,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing. mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMessage, hi.ProcessReceivedMessage(nil)) @@ -430,12 +455,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -449,6 +470,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) }, } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -457,14 +482,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -507,11 +532,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headerValidator := &mock.HeaderValidatorStub{ IsHeaderValidForProcessingCalled: func(headerHandler data.HeaderHandler) bool { @@ -520,6 +540,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { } hdrsNonces := &mock.Uint64SyncMapCacherStub{} + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -527,16 +549,16 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { hdrsNonces, headerValidator, multisigner, - mock.HasherMock{}, - mock.NewMultiShardsCoordinatorMock(2), - chronologyValidator, + hasher, + mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -576,12 +598,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -595,6 +613,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi }, } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -603,14 +625,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -648,12 +670,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -670,6 +688,16 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( shardCoordinator.CurrentShard = 2 shardCoordinator.SetNoShards(5) + nodesCoordinator := &mock.NodesCoordinatorMock{ + NbShards: 5, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 2, + } + + nodes := generateValidatorsMap(3, 3, 5) + nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -678,14 +706,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( multisigner, mock.HasherMock{}, shardCoordinator, - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -708,5 +736,4 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( } assert.Nil(t, hi.ProcessReceivedMessage(msg)) - } diff --git a/process/block/interceptors/metachainHeaderInterceptor.go b/process/block/interceptors/metachainHeaderInterceptor.go index 69979922869..af4f382b350 100644 --- a/process/block/interceptors/metachainHeaderInterceptor.go +++ b/process/block/interceptors/metachainHeaderInterceptor.go @@ -23,7 +23,7 @@ type MetachainHeaderInterceptor struct { multiSigVerifier crypto.MultiSigVerifier hasher hashing.Hasher shardCoordinator sharding.Coordinator - chronologyValidator process.ChronologyValidator + nodesCoordinator sharding.NodesCoordinator } // NewMetachainHeaderInterceptor hooks a new interceptor for metachain block headers @@ -36,7 +36,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, ) (*MetachainHeaderInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -60,8 +60,8 @@ func NewMetachainHeaderInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator } return &MetachainHeaderInterceptor{ @@ -72,7 +72,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier: multiSigVerifier, hasher: hasher, shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, + nodesCoordinator: nodesCoordinator, metachainHeadersNonces: metachainHeadersNonces, }, nil } @@ -85,7 +85,12 @@ func (mhi *MetachainHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } - metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier, mhi.chronologyValidator) + metaHdrIntercepted := block.NewInterceptedMetaHeader( + mhi.multiSigVerifier, + mhi.nodesCoordinator, + mhi.marshalizer, + mhi.hasher, + ) err = mhi.marshalizer.Unmarshal(metaHdrIntercepted, message.Data()) if err != nil { return err diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index ea36ba5be05..7803d21b516 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -32,7 +32,7 @@ func TestNewMetachainHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -52,7 +52,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersDataPool, err) @@ -72,7 +72,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersNoncesShouldErr(t *tes mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersNoncesDataPool, err) @@ -92,7 +92,7 @@ func TestNewMetachainHeaderInterceptor_NilMetaHeaderValidatorShouldErr(t *testin mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeaderHandlerValidator, err) @@ -113,7 +113,7 @@ func TestNewMetachainHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, mhi) @@ -134,7 +134,7 @@ func TestNewMetachainHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHasher, err) @@ -155,13 +155,34 @@ func TestNewMetachainHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, mhi) } +func TestNewMetachainHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + metachainHeaders := &mock.CacherStub{} + headerValidator := &mock.HeaderValidatorStub{} + + mhi, err := interceptors.NewMetachainHeaderInterceptor( + &mock.MarshalizerMock{}, + metachainHeaders, + &mock.Uint64SyncMapCacherStub{}, + headerValidator, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + ) + + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, mhi) +} + func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -176,7 +197,7 @@ func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, err) @@ -199,7 +220,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMessage, mhi.ProcessReceivedMessage(nil)) @@ -219,7 +240,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilDataToProcessShould mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{} @@ -246,7 +267,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnm mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{ @@ -262,24 +283,22 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul metachainHeaders := &mock.CacherStub{} headerValidator := &mock.HeaderValidatorStub{} marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + nodesCoordinator := mock.NewNodesCoordinatorMock() + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, &mock.Uint64SyncMapCacherStub{}, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -292,6 +311,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) metachainHeaders := &mock.CacherStub{} @@ -302,26 +322,23 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te }, } multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + nodesCoordinator := &mock.NodesCoordinatorMock{} + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, metachainHeadersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) hdr.RootHash = make([]byte, 0) @@ -377,14 +394,10 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } metachainHeaders := &mock.CacherStub{} metachainHeadersNonces := &mock.Uint64SyncMapCacherStub{} headerValidator := &mock.HeaderValidatorStub{ @@ -392,21 +405,24 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd return false }, } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, metachainHeadersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.SetHash([]byte("aaa")) diff --git a/process/block/metablock.go b/process/block/metablock.go index f3bcd8f448a..c5c49e6ce24 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -50,6 +50,8 @@ func NewMetaProcessor( dataPool dataRetriever.MetaPoolsHolder, forkDetector process.ForkDetector, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, hasher hashing.Hasher, marshalizer marshal.Marshalizer, store dataRetriever.StorageService, @@ -65,6 +67,8 @@ func NewMetaProcessor( marshalizer, store, shardCoordinator, + nodesCoordinator, + specialAddressHandler, uint64Converter) if err != nil { return nil, err @@ -93,6 +97,8 @@ func NewMetaProcessor( marshalizer: marshalizer, store: store, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + specialAddressHandler: specialAddressHandler, uint64Converter: uint64Converter, onRequestHeaderHandler: requestHandler.RequestHeader, onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, @@ -226,6 +232,11 @@ func (mp *metaProcessor) ProcessBlock( return nil } +// SetConsensusData - sets the reward addresses for the current consensus group +func (mp *metaProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + // nothing to do +} + func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) if err != nil { diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index dbf2b4bd0c6..271413ff620 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -131,6 +131,8 @@ func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -151,6 +153,8 @@ func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { nil, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -172,6 +176,8 @@ func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { mdp, nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -193,6 +199,8 @@ func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, nil, + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -214,6 +222,8 @@ func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, nil, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -235,6 +245,8 @@ func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, nil, &mock.ChainStorerMock{}, @@ -256,6 +268,8 @@ func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, nil, @@ -277,6 +291,8 @@ func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -372,6 +388,8 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -476,6 +494,8 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -513,6 +533,8 @@ func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -567,6 +589,8 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(3), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -608,6 +632,8 @@ func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -647,6 +673,8 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizer, &mock.ChainStorerMock{}, @@ -694,6 +722,8 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { }, }, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, store, @@ -729,6 +759,8 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, store, @@ -767,6 +799,8 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { mdp, fd, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, &mock.MarshalizerMock{}, store, @@ -832,6 +866,8 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { mdp, fd, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, &mock.MarshalizerMock{}, store, @@ -879,6 +915,8 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, &mock.ChainStorerMock{}, @@ -916,6 +954,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *tes mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -938,6 +978,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -963,6 +1005,8 @@ func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFai initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -992,6 +1036,8 @@ func TestMetaProcessor_CreateBlockHeaderShouldWork(t *testing.T) { initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1023,6 +1069,8 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) initMetaDataPool(), &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1045,6 +1093,8 @@ func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1074,6 +1124,8 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1167,6 +1219,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(5), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1246,6 +1300,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1363,6 +1419,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1529,6 +1587,8 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1653,6 +1713,8 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldErrNilMetaBlockHeader(t *testi mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, &mock.MarshalizerMock{}, initStore(), @@ -1696,6 +1758,8 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasherMock, marshalizerMock, store, @@ -1735,6 +1799,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1828,6 +1894,8 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1922,6 +1990,8 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -1979,6 +2049,8 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2042,6 +2114,8 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2154,6 +2228,8 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2261,6 +2337,8 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { pool, &mock.ForkDetectorMock{}, mock.NewMultiShardsCoordinatorMock(noOfShards), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, hasher, marshalizer, initStore(), @@ -2370,6 +2448,8 @@ func TestMetaProcessor_DecodeBlockBody(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, @@ -2398,6 +2478,8 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, @@ -2434,6 +2516,8 @@ func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { mdp, &mock.ForkDetectorMock{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, &mock.HasherStub{}, marshalizerMock, &mock.ChainStorerMock{}, diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go new file mode 100644 index 00000000000..a291f256b2d --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -0,0 +1,545 @@ +package preprocess + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type rewardTxPreprocessor struct { + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + rewardsProducer process.InternalTransactionProducer + accounts state.AccountsAdapter +} + +// NewRewardTxPreprocessor creates a new reward transaction preprocessor object +func NewRewardTxPreprocessor( + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + rewardProducer process.InternalTransactionProducer, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), +) (*rewardTxPreprocessor, error) { + + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxProcessor + } + if rewardProducer == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + rewardsProducer: rewardProducer, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil +} + +// waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared +func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } +} + +// IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool +func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil +} + +// RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools +func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { + if body == nil { + return process.ErrNilTxBlockBody + } + + return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) +} + +// RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools +func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( + body block.Body, + miniBlockPool storage.Cacher, +) (int, error) { + if miniBlockPool == nil { + return 0, process.ErrNilMiniBlockPool + } + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + miniBlockHash, err := core.CalculateHash(rtp.marshalizer, rtp.hasher, miniBlock) + if err != nil { + return rewardTxsRestored, err + } + + miniBlockPool.Put(miniBlockHash, miniBlock) + + err = rtp.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) + if err != nil { + return rewardTxsRestored, err + } + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, nil +} + +// ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state +func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocksSlice) + + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if !haveTime() { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txData := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txData == nil || txData.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txData.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil +} + +// AddComputedRewardMiniBlocks adds to the local cache the reward transactions from the given miniblocks +func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { + for _, rewardMb := range computedRewardMiniblocks { + txShardData := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + for _, txHash := range rewardMb.TxHashes { + tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) + if !ok { + log.Error(process.ErrRewardTransactionNotFound.Error()) + continue + } + + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error(process.ErrWrongTypeAssertion.Error()) + } + + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ + tx: rTx, + txShardInfo: txShardData, + } + } + } +} + +// SaveTxBlockToStorage saves the reward transactions from body into storage +func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil +} + +// receivedRewardTransaction is a callback function called when a new reward transaction +// is added in the reward transactions pool +func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } +} + +// CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round +func (rtp *rewardTxPreprocessor) CreateBlockStarted() { + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() +} + +// RequestBlockTransactions request for reward transactions if missing from a block.Body +func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashes := range missingRewardTxsForShards { + for _, txHash := range rewardTxHashes { + rtp.setMissingTxsForShard(senderShardID, txHash) + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, mbsRewardTxHashes := range missingRewardTxsForShards { + for _, mbRewardTxHashes := range mbsRewardTxHashes { + requestedRewardTxs += len(mbRewardTxHashes.txHashes) + rtp.onRequestRewardTx(senderShardID, mbRewardTxHashes.txHashes) + } + } + + return requestedRewardTxs +} + +func (rtp *rewardTxPreprocessor) setMissingTxsForShard(senderShardID uint32, mbTxHashes *txsHashesInfo) { + txShardData := &txShardInfo{senderShardID: senderShardID, receiverShardID: mbTxHashes.receiverShardID} + for _, txHash := range mbTxHashes.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardData} + } +} + +// computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing +// from block.Body +func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32][]*txsHashesInfo { + rewardTxs := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxs = append(rewardTxs, mb) + } + + missingTxsForShards := rtp.computeExistingAndMissing( + rewardTxs, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) + + return missingTxsForShards +} + +// processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool +func (rtp *rewardTxPreprocessor) processRewardTransaction( + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, +) error { + + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } + + txShardData := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardData} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + + return len(missingRewardTxsForMiniBlock) +} + +// computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlock { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs +} + +// getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure +func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( + mb *block.MiniBlock, + haveTime func() bool, +) ([]*rewardTx.RewardTx, [][]byte, error) { + + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, ok := txCache.Peek(txHash) + if !ok { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil +} + +// CreateAndProcessMiniBlock creates the miniblock from storage and processes the reward transactions added into the miniblock +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { + return nil, nil +} + +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error) { + + // always have time for rewards + haveTime := func() bool { + return true + } + + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + + snapshot := rtp.accounts.JournalLen() + + for _, mb := range rewardMiniBlocksSlice { + err := rtp.ProcessMiniBlock(mb, haveTime, round) + + if err != nil { + log.Error(err.Error()) + errAccountState := rtp.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } + return nil, err + } + } + + return rewardMiniBlocksSlice, nil +} + +// ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions +// in local cache +func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { + if miniBlock.Type != block.RewardsBlock { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + return process.ErrTimeIsOut + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardData := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardData} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure +func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } + + return marshaledRewardTxs, nil +} + +// GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing +func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { + rewardTxPool := make(map[string]data.TransactionHandler) + + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txData := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txData.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + + return rewardTxPool +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false +} diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go new file mode 100644 index 00000000000..226b432cc05 --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -0,0 +1,695 @@ +package preprocess + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxPreprocessor_NilRewardTxDataPoolShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxPreprocessor( + nil, + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxPreprocessor_NilStoreShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxPreprocessor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxPreprocessor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + nil, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxPreprocessor_NilRewardTxProcessorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) +} + +func TestNewRewardTxPreprocessor_NilRewardProducerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilInternalTransactionProducer, err) +} + +func TestNewRewardTxPreprocessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + nil, + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxPreprocessor_NilAccountsAdapterShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxPreprocessor_NilRequestHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + nil, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRequestHandler, err) +} + +func TestNewRewardTxPreprocessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + assert.Nil(t, err) + assert.NotNil(t, rtp) +} + +func TestRewardTxPreprocessor_AddComputedRewardMiniBlocksShouldAddMiniBlock(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res := rtp.GetAllCurrentUsedTxs() + + if _, ok := res[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res, err := rtp.CreateMarshalizedData(txHashes) + assert.Nil(t, err) + assert.Equal(t, 1, len(res)) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + assert.Equal(t, process.ErrWrongTypeInMiniBlock, err) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + assert.Nil(t, err) + + txsMap := rtp.GetAllCurrentUsedTxs() + if _, ok := txsMap[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_SaveTxBlockToStorageShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + err := rtp.SaveTxBlockToStorage(blockBody) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + _ = rtp.SaveTxBlockToStorage(blockBody) + + res := rtp.RequestBlockTransactions(blockBody) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + res := rtp.RequestTransactionsForMiniBlock(mb1) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + err := rtp.ProcessBlockTransactions(blockBody, 0, haveTimeTrue) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Equal(t, process.ErrTimeIsOut, err) +} + +func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + go func() { + time.Sleep(50 * time.Millisecond) + rtp.chReceivedAllRewardTxs <- true + }() + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + storer := mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + retMap := map[string][]byte{ + "tx_hash1": []byte(`{"Round": 0}`), + } + + return retMap, nil + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + } + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &storer, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte("tx_hash1")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + var blockBody block.Body + blockBody = append(blockBody, &mb1) + miniBlockPool := mock.NewCacherMock() + + numRestoredTxs, err := rtp.RestoreTxBlockIntoPools(blockBody, miniBlockPool) + assert.Equal(t, 1, numRestoredTxs) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksTxForMiniBlockNotFoundShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("hash_unavailable")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.Nil(t, mBlocksSlice) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("tx1_hash")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.NotNil(t, mBlocksSlice) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + rtp.CreateBlockStarted() + assert.Equal(t, 0, len(rtp.rewardTxsForBlock.txHashAndInfo)) +} diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go new file mode 100644 index 00000000000..099a8008e46 --- /dev/null +++ b/process/block/preprocess/rewardsHandler.go @@ -0,0 +1,469 @@ +package preprocess + +import ( + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +const communityPercentage = 0.1 // 1 = 100%, 0 = 0% +const leaderPercentage = 0.5 // 1 = 100%, 0 = 0% +const burnPercentage = 0.4 // 1 = 100%, 0 = 0% + +// TODO: Replace with valid reward value +var rewardValue = big.NewInt(1000) + +type rewardsHandler struct { + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + adrConv state.AddressConverter + store dataRetriever.StorageService + rewardTxPool dataRetriever.ShardedDataCacherNotifier + + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + protocolRewardsMeta []data.TransactionHandler + feeRewards []data.TransactionHandler + + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx +} + +// NewRewardTxHandler constructor for the reward transaction handler +func NewRewardTxHandler( + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + adrConv state.AddressConverter, + store dataRetriever.StorageService, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, +) (*rewardsHandler, error) { + if address == nil || address.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + + rtxh := &rewardsHandler{ + address: address, + shardCoordinator: shardCoordinator, + adrConv: adrConv, + hasher: hasher, + marshalizer: marshalizer, + store: store, + rewardTxPool: rewardTxPool, + } + + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil +} + +// SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs +func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for _, rTx := range rtxh.rewardTxsForBlock { + buff, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return err + } + + errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Error(errNotCritical.Error()) + } + } + + return nil +} + +// AddIntermediateTransactions adds intermediate transactions to local cache +func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for i := 0; i < len(txs); i++ { + addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { + continue + } + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) + if err != nil { + return err + } + + rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx + } + + return nil +} + +// CreateAllInterMiniBlocks creates miniblocks from process transactions +func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + rtxh.mutGenRewardTxs.Lock() + + rtxh.feeRewards = rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rtxh.feeRewards) + + rtxh.protocolRewards = rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rtxh.protocolRewards) + + rtxh.protocolRewardsMeta = rtxh.createProtocolRewardsForMeta() + rtxh.addTransactionsToPool(rtxh.protocolRewardsMeta) + + calculatedRewardTxs := make([]data.TransactionHandler, 0) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + + rtxh.mutGenRewardTxs.Unlock() + + miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + + return miniBlocks +} + +func (rtxh *rewardsHandler) addTransactionsToPool(rewardTxs []data.TransactionHandler) { + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + } + + // add the reward transaction to the the pool so that the processor can find it + cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) + rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) + } +} + +func (rtxh *rewardsHandler) miniblocksFromRewardTxs( + rewardTxs []data.TransactionHandler, +) map[uint32]*block.MiniBlock { + miniBlocks := make(map[uint32]*block.MiniBlock, 0) + + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + SenderShardID: rtxh.shardCoordinator.SelfId(), + Type: block.RewardsBlock, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks +} + +// VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block +func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { + err := rtxh.verifyCreatedRewardsTxs() + return err +} + +// CreateBlockStarted does the cleanup before creating a new block +func (rtxh *rewardsHandler) CreateBlockStarted() { + rtxh.cleanCachedData() +} + +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + marshaledTxs := make([][]byte, 0) + for _, txHash := range txHashes { + rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] + if !ok { + return nil, process.ErrRewardTxNotFound + } + + marshaledTx, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + marshaledTxs = append(marshaledTxs, marshaledTx) + } + + return marshaledTxs, nil +} + +// ProcessTransactionFee adds the tx cost to the accumulated amount +func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() +} + +// cleanCachedData deletes the cached data +func (rtxh *rewardsHandler) cleanCachedData() { + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() + + rtxh.mutGenRewardTxs.Lock() + rtxh.feeRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewardsMeta = make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.Unlock() +} + +func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) + + z := new(big.Float).Mul(x, y) + + op := big.NewInt(0) + result, _ := z.Int(op) + + return result +} + +func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +// createRewardFromFees creates the reward transactions from accumulated fees +// According to economic paper, out of the block fees 40% are burned, 50% go to the +// leader and 10% go to Elrond community fund. +func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } + + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() + + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + + return currFeeTxs +} + +// createProtocolRewards creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { + consensusRewardData := rtxh.address.ConsensusShardRewardData() + + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardData.Addresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = consensusRewardData.Epoch + rTx.Round = consensusRewardData.Round + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + + return consensusRewardTxs +} + +// createProtocolRewardsForMeta creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewardsForMeta() []data.TransactionHandler { + metaRewardsData := rtxh.address.ConsensusMetaRewardData() + consensusRewardTxs := make([]data.TransactionHandler, 0) + + for _, metaConsensusSet := range metaRewardsData { + for _, address := range metaConsensusSet.Addresses { + shardId, err := rtxh.address.ShardIdForAddress([]byte(address)) + if err != nil { + log.Error(err.Error()) + continue + } + + if shardId != rtxh.shardCoordinator.SelfId() { + continue + } + + rTx := &rewardTx.RewardTx{} + rTx.Value = rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = metaConsensusSet.Epoch + rTx.Round = metaConsensusSet.Round + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + } + + return consensusRewardTxs +} + +// verifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same +func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.RLock() + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.RUnlock() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, rTx := range rtxh.rewardTxsForBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) + } + + if len(calculatedRewardTxs) != len(rtxh.rewardTxsForBlock) { + return process.ErrRewardTxsMismatchCreatedReceived + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedRewardTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) + if err != nil { + return err + } + + txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] + if !ok { + return process.ErrRewardTxNotFound + } + if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { + return process.ErrRewardTxsDoNotMatch + } + } + + return nil +} + +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, info := range rtxh.rewardTxsForBlock { + + senderShard := info.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(info.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = info + } + rtxh.mut.Unlock() + + return rewardTxPool +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtxh *rewardsHandler) IsInterfaceNil() bool { + if rtxh == nil { + return true + } + return false +} diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go new file mode 100644 index 00000000000..9523a05abee --- /dev/null +++ b/process/block/preprocess/rewardsHandler_test.go @@ -0,0 +1,576 @@ +package preprocess + +import ( + "bytes" + "math/big" + "reflect" + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilSpecialAddressHandler, err) +} + +func TestNewRewardTxHandler_NilHasher(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxHandler_NilAddressConverter(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxHandler_NilChainStorer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + nil, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxHandler_NilRewardsPool(t *testing.T) { + t.Parallel() + + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + nil, + ) + + assert.Nil(t, th) + assert.NotNil(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxHandler_ValsOk(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) +} + +func TestRewardsHandler_AddIntermediateTransactions(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.AddIntermediateTransactions(nil) + assert.Nil(t, err) +} + +func TestRewardsHandler_ProcessTransactionFee(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(nil) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(10)) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(100)) + assert.Equal(t, big.NewInt(110), th.accumulatedFees) +} + +func TestRewardsHandler_cleanCachedData(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(big.NewInt(10)) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + assert.Equal(t, 1, len(th.rewardTxsForBlock)) + + th.cleanCachedData() + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + assert.Equal(t, 0, len(th.rewardTxsForBlock)) +} + +func TestRewardsHandler_CreateRewardsFromFees(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := th.createRewardFromFees() + assert.Equal(t, 0, len(txs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + txs = th.createRewardFromFees() + assert.Equal(t, 3, len(txs)) + + totalSum := txs[0].GetValue().Uint64() + totalSum += txs[1].GetValue().Uint64() + totalSum += txs[2].GetValue().Uint64() + + assert.Equal(t, currTxFee.Uint64(), totalSum) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.CreateAllInterMiniBlocks() + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.BurnAddress()}}) + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + extraVal := big.NewInt(100) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxsMismatchCreatedReceived, err) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsOK(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) +} + +func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + nodesCoordinator := mock.NewNodesCoordinatorMock() + tdp := initDataPool() + th, err := NewRewardTxHandler( + mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ), + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + mbs := th.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + mbs = th.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) +} + +func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { + t.Parallel() + + nodesCoordinator := mock.NewNodesCoordinatorMock() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + tdp := initDataPool() + specialAddress := &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + } + + _ = specialAddress.SetShardConsensusData([]byte("random"), 0, 0, shardCoordinator.SelfId()) + rewardData := specialAddress.ConsensusShardRewardData() + + th, err := NewRewardTxHandler( + specialAddress, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := make([]data.TransactionHandler, len(rewardData.Addresses)) + for i := 0; i < len(rewardData.Addresses); i++ { + txs[i] = &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte(rewardData.Addresses[i]), + ShardId: 0, + } + + } + + err = th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + finishedTxs := th.GetAllCurrentFinishedTxs() + assert.Equal(t, len(txs), len(finishedTxs)) + + for _, ftx := range finishedTxs { + found := false + for _, tx := range txs { + if reflect.DeepEqual(tx, ftx) { + found = true + break + } + } + + assert.True(t, found) + } +} + +func TestRewardsHandler_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + var expectedMarshalizedTxs [][]byte + marshTx1, _ := th.marshalizer.Marshal(txs[0]) + marshTx2, _ := th.marshalizer.Marshal(txs[1]) + expectedMarshalizedTxs = append(expectedMarshalizedTxs, marshTx1, marshTx2) + + var txsHashes [][]byte + tx1Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[0]) + tx2Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[1]) + txsHashes = append(txsHashes, tx1Hash, tx2Hash) + + res, err := th.CreateMarshalizedData(txsHashes) + assert.Nil(t, err) + assert.Equal(t, len(txs), len(res)) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[0], res[0])) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[1], res[1])) +} + +func TestRewardsHandler_CreateBlockStartedShouldCreateProtocolReward(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + ) + + assert.Nil(t, th.protocolRewards) + + th.CreateBlockStarted() + assert.NotNil(t, th.protocolRewards) +} + +func TestRewardsHandler_SaveCurrentIntermediateTxToStorageShouldWork(t *testing.T) { + t.Parallel() + + putWasCalled := false + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putWasCalled = true + return nil + }, + }, + tdp.RewardTransactions(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + err = th.SaveCurrentIntermediateTxToStorage() + assert.Nil(t, err) + assert.True(t, putWasCalled) +} diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 8f50af74e88..469f04fe575 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -182,7 +182,7 @@ func (scr *smartContractResults) RestoreTxBlockIntoPools( } // ProcessBlockTransactions processes all the smartContractResult from the block.Body, updates the state -func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -198,7 +198,7 @@ func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -416,6 +416,17 @@ func (scr *smartContractResults) CreateAndProcessMiniBlock(sndShardId, dstShardI return nil, nil } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (scr *smartContractResults) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error) { + return nil, nil +} + // ProcessMiniBlock processes all the smartContractResults from a and saves the processed smartContractResults in local cache complete miniblock func (scr *smartContractResults) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { if miniBlock.Type != block.SmartContractResultBlock { diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 2452bc28878..22851d4d989 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -300,7 +300,7 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -352,7 +352,7 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) @@ -619,7 +619,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { miniblock := block.MiniBlock{ ReceiverShardID: 0, - SenderShardID: 0, + SenderShardID: 1, TxHashes: txHashes, Type: block.SmartContractResultBlock, } @@ -635,7 +635,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { scr.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&smartcr, &txshardInfo} - err := scr.ProcessBlockTransactions(body, 1, haveTime) + err := scr.ProcessBlockTransactions(body, 1, haveTimeTrue) assert.Nil(t, err) } @@ -756,7 +756,7 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { } } - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 02850fcb6ac..141da4e5e42 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -193,7 +193,7 @@ func (txs *transactions) RestoreTxBlockIntoPools( } // ProcessBlockTransactions processes all the transaction from the block.Body, updates the state -func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -202,7 +202,7 @@ func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -428,6 +428,52 @@ func isSmartContractAddress(rcvAddress []byte) bool { return false } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the transactions added into the miniblocks +// as long as it has time +func (txs *transactions) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + + miniBlocks := make(block.MiniBlockSlice, 0) + newMBAdded := true + txSpaceRemained := int(maxTxSpaceRemained) + + for newMBAdded { + newMBAdded = false + for shardId := uint32(0); shardId < txs.shardCoordinator.NumberOfShards(); shardId++ { + if maxTxSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txs.CreateAndProcessMiniBlock( + txs.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + return miniBlocks, nil +} + // CreateAndProcessMiniBlock creates the miniblock from storage and processes the transactions added into the miniblock func (txs *transactions) CreateAndProcessMiniBlock( sndShardId uint32, diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 8c00e50f546..0ea75f30432 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math/big" "math/rand" "reflect" "sync" @@ -12,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -46,6 +48,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -74,6 +77,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -83,6 +87,35 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + } + }, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{} }, @@ -376,7 +409,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -435,7 +468,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8c4b0d66ae7..6362e474934 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -56,6 +56,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { arguments.Marshalizer, arguments.Store, arguments.ShardCoordinator, + arguments.NodesCoordinator, + arguments.SpecialAddressHandler, arguments.Uint64Converter) if err != nil { return nil, err @@ -87,6 +89,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { marshalizer: arguments.Marshalizer, store: arguments.Store, shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, uint64Converter: arguments.Uint64Converter, onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, appStatusHandler: statusHandler.NewNilStatusHandler(), @@ -185,6 +189,16 @@ func (sp *shardProcessor) ProcessBlock( log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) + err = sp.specialAddressHandler.SetShardConsensusData( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + headerHandler.GetEpoch(), + headerHandler.GetShardID(), + ) + if err != nil { + return err + } + sp.txCoordinator.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -237,6 +251,16 @@ func (sp *shardProcessor) ProcessBlock( } }() + processedMetaHdrs, err := sp.getProcessedMetaBlocksFromMiniBlocks(body, header.MetaBlockHashes) + if err != nil { + return err + } + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return err + } + err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) if err != nil { return err @@ -255,6 +279,30 @@ func (sp *shardProcessor) ProcessBlock( return nil } +func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.HeaderHandler) error { + sp.specialAddressHandler.ClearMetaConsensusData() + + // for every finalized metablock header, reward the metachain consensus group members with accounts in shard + for _, metaBlock := range finalizedMetaBlocks { + round := metaBlock.GetRound() + epoch := metaBlock.GetEpoch() + err := sp.specialAddressHandler.SetMetaConsensusData(metaBlock.GetPrevRandSeed(), round, epoch) + if err != nil { + return err + } + } + + return nil +} + +// SetConsensusData - sets the reward data for the current consensus group +func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + err := sp.specialAddressHandler.SetShardConsensusData(randomness, round, epoch, shardId) + if err != nil { + log.Error(err.Error()) + } +} + // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { metablockCache := sp.dataPool.MetaBlocks() @@ -291,7 +339,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head }) for _, metaHdr := range currAddedMetaHdrs { - err := sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err } @@ -327,7 +375,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error // found a header with the next nonce if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) if err != nil { log.Debug(err.Error()) continue @@ -467,10 +515,14 @@ func (sp *shardProcessor) indexBlockIfNeeded( txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) for hash, tx := range scPool { txPool[hash] = tx } + for hash, tx := range rewardPool { + txPool[hash] = tx + } go sp.core.Indexer().SaveBlock(body, header, txPool) } @@ -576,7 +628,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[key] + _, ok = crossMiniBlockHashes[key] if !ok { continue } @@ -865,10 +917,14 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaHdrs := make([]data.HeaderHandler, 0) + processedMetaHeaders, usedMbs, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, header.MetaBlockHashes) + if err != nil { + return nil, err + } + for _, metaBlockKey := range header.MetaBlockHashes { - obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if obj == nil { + obj, ok := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + if !ok { return nil, process.ErrNilMetaBlockHeader } @@ -877,16 +933,82 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) return nil, process.ErrWrongTypeAssertion } + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key := range crossMiniBlockHashes { + if usedMbs[key] { + sp.addProcessedMiniBlock(metaBlockKey, []byte(key)) + } + } + } + + return processedMetaHeaders, nil +} + +// getProcessedMetaBlocks returns all the meta blocks fully processed +func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( + usedMiniBlocks []*block.MiniBlock, + usedMetaBlockHashes [][]byte, +) ([]data.HeaderHandler, error) { + if usedMiniBlocks == nil || usedMetaBlockHashes == nil { + // not an error, it can happen that no metablock header or no miniblock is used. + return make([]data.HeaderHandler, 0), nil + } + + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(usedMiniBlocks); i++ { + miniBlock := usedMiniBlocks[i] + if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { + continue + } + + mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) + if err != nil { + log.Debug(err.Error()) + continue + } + miniBlockHashes[i] = mbHash + } + + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + processedMetaBlocks, _, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, usedMetaBlockHashes) + + return processedMetaBlocks, err +} + +func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( + miniBlockHashes map[int][]byte, + usedMetaBlockHashes [][]byte, +) ([]data.HeaderHandler, map[string]bool, error) { + + processedMetaHdrs := make([]data.HeaderHandler, 0) + processedMBs := make(map[string]bool) + + for _, metaBlockKey := range usedMetaBlockHashes { + obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + if obj == nil { + return nil, nil, process.ErrNilMetaBlockHeader + } + + metaBlock, ok := obj.(*block.MetaBlock) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for hash := range crossMiniBlockHashes { + processedMBs[hash] = sp.isMiniBlockProcessed(metaBlockKey, []byte(hash)) + } + for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[string(miniBlockHashes[key])] + _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] if !ok { continue } - sp.addProcessedMiniBlock(metaBlockKey, miniBlockHashes[key]) + processedMBs[string(miniBlockHashes[key])] = true + delete(miniBlockHashes, key) } @@ -894,7 +1016,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) processedAll := true for key := range crossMiniBlockHashes { - if !sp.isMiniBlockProcessed(metaBlockKey, []byte(key)) { + if !processedMBs[key] { processedAll = false break } @@ -905,7 +1027,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) } } - return processedMetaHdrs, nil + return processedMetaHdrs, processedMBs, nil } func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { @@ -1333,7 +1455,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( continue } - err := sp.isHdrConstructionValid(hdr, lastMetaHdr) + err = sp.isHdrConstructionValid(hdr, lastMetaHdr) if err != nil { continue } @@ -1418,17 +1540,22 @@ func (sp *shardProcessor) createMiniBlocks( log.Info(err.Error()) } + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks, usedMetaHdrsHashes) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return nil, err + } + log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) if len(destMeMiniBlocks) > 0 { miniBlocks = append(miniBlocks, destMeMiniBlocks...) } - if !haveTime() { - log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) - return miniBlocks, nil - } - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index e565c25d3f6..3cda2c559f5 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -42,14 +42,16 @@ func initAccountsMock() *mock.AccountsStub { } } -func initBasicTestData() (*mock.PoolsHolderFake, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { - tdp := mock.NewPoolsHolderFake() +func initBasicTestData() (*mock.PoolsHolderMock, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Round: 1, - Nonce: 1, + Round: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -69,11 +71,12 @@ func initBasicTestData() (*mock.PoolsHolderFake, *blockchain.BlockChain, []byte, return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash } -func initBlockHeader(prevHash []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { +func initBlockHeader(prevHash []byte, prevRandSeed []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { hdr := block.Header{ Nonce: 2, Round: 2, PrevHash: prevHash, + PrevRandSeed: prevRandSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -296,6 +299,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { Nonce: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("roothash"), } @@ -310,7 +314,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { err := sp.ProcessBlock(blkc, &hdr, body, haveTime) assert.NotNil(t, err) - assert.Equal(t, err, process.ErrAccountStateDirty) + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { @@ -321,6 +325,7 @@ func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { hdr := block.Header{ Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -395,6 +400,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T hdr := block.Header{ Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -429,6 +435,8 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -467,6 +475,7 @@ func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("root hash"), } @@ -486,6 +495,7 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("root hash"), } @@ -508,19 +518,23 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing. return 0 }, } + + randSeed := []byte("rand seed") sp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{ Nonce: 1, Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte("zzz"), + PrevRandSeed: randSeed, Signature: []byte("signature"), RootHash: []byte("root hash"), } body := make(block.Body, 0) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } err := sp.ProcessBlock(blkc, hdr, body, haveTime) @@ -531,9 +545,11 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR t.Parallel() tdp := initDataPool([]byte("tx_hash1")) txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } body := make(block.Body, 0) @@ -563,6 +579,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -606,6 +623,8 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR tpm, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -643,11 +662,14 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } body := make(block.Body, 0) @@ -677,6 +699,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -720,11 +743,14 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -755,6 +781,7 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -789,11 +816,14 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { t.Parallel() + + randSeed := []byte("rand seed") tdp := initDataPool([]byte("tx_hash1")) txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -828,6 +858,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -871,9 +902,10 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) + randSeed := []byte("rand seed") lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -892,6 +924,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { meta := block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -899,8 +932,9 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -939,9 +973,11 @@ func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) txHash := []byte("tx_hash1") tdp := initDataPool(txHash) + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -974,6 +1010,7 @@ func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) Round: 2, Nonce: 2, PrevHash: preHash, + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -1003,9 +1040,10 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) + randSeed := []byte("rand seed") lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1024,6 +1062,7 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { meta := block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1032,8 +1071,9 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1070,9 +1110,11 @@ func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing txHash := []byte("tx_hash1") tdp := initDataPool(txHash) + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -1102,7 +1144,7 @@ func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) rootHashCalled := func() ([]byte, error) { return rootHash, nil @@ -1129,13 +1171,16 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. ReceiverShardID: 0, SenderShardID: 1, TxCount: uint32(len(txHashes)), - Hash: mbHash} + Hash: mbHash, + } mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + randSeed := []byte("rand seed") + + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1155,6 +1200,7 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1163,9 +1209,10 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. tdp.MetaBlocks().Put(metaHash, meta) meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1209,12 +1256,14 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -1244,7 +1293,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1264,6 +1313,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1272,10 +1322,11 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: metaHash, + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1295,6 +1346,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } ordered, _ := sp.GetOrderedMetaBlocks(3) res = sp.IsMetaHeaderFinal(meta, ordered, 0) @@ -1305,7 +1357,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) @@ -1319,7 +1371,7 @@ func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) rootHash := []byte("rootHash") @@ -1349,7 +1401,8 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) lastHdr := genesisBlocks[0] prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1412,13 +1465,15 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing. txHashes := make([][]byte, 0) txHashes = append(txHashes, txHash) - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) lastHdr := genesisBlocks[0] prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) hdr.Round = 0 @@ -1746,6 +1801,8 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -1780,6 +1837,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { rootHash := []byte("root hash") hdrHash := []byte("header hash") + randSeed := []byte("rand seed") prevHdr := &block.Header{ Nonce: 0, @@ -1788,6 +1846,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + RandSeed: randSeed, } hdr := &block.Header{ @@ -1797,6 +1856,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + PrevRandSeed: randSeed, } mb := block.MiniBlock{ TxHashes: [][]byte{txHash}, @@ -1878,6 +1938,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { rootHash := []byte("root hash") hdrHash := []byte("header hash") + randSeed := []byte("rand seed") prevHdr := &block.Header{ Nonce: 0, @@ -1886,6 +1947,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + RandSeed: randSeed, } hdr := &block.Header{ @@ -1895,6 +1957,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + PrevRandSeed: randSeed, } mb := block.MiniBlock{ TxHashes: [][]byte{txHash}, @@ -2301,6 +2364,8 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2401,6 +2466,8 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2434,7 +2501,7 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a metablock that will return 3 miniblock hashes //1 miniblock hash will be in cache @@ -2509,7 +2576,7 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a metablock that will return 3 miniblock hashes //1 miniblock hash will be in cache @@ -2562,7 +2629,7 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) body := make(block.Body, 0) @@ -2630,7 +2697,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte(nil) tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) @@ -2654,7 +2721,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo haveTimeTrue := func() bool { return true } - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() destShardId := uint32(2) hasher := &mock.HasherStub{} @@ -2742,7 +2809,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a 3 txs in pool @@ -2815,6 +2882,8 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T txProcessorMock, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -2863,7 +2932,7 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() miniblockHashes := make([][]byte, 6) @@ -2968,7 +3037,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { txHash := []byte("tx hash 1") - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() marshalizerMock := &mock.MarshalizerMock{} hasherMock := &mock.HasherStub{} @@ -3003,6 +3072,8 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := factory.Create() @@ -3223,7 +3294,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3267,8 +3338,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { RandSeed: prevRandSeed} notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - //put the existing headers inside datapool - //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ @@ -3380,7 +3449,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3516,7 +3585,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3761,7 +3830,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { marshalizer := &mock.MarshalizerMock{} - poolFake := mock.NewPoolsHolderFake() + poolFake := mock.NewPoolsHolderMock() metaBlock := block.MetaBlock{ Nonce: 1, @@ -4119,13 +4188,13 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { t.Parallel() marshalizer := &mock.MarshalizerMock{} - poolFake := mock.NewPoolsHolderFake() + poolMock := mock.NewPoolsHolderMock() storer := &mock.ChainStorerMock{} shardC := mock.NewMultiShardsCoordinatorMock(3) arguments := CreateMockArgumentsMultiShard() - arguments.DataPool = poolFake + arguments.DataPool = poolMock arguments.Store = storer arguments.ShardCoordinator = shardC arguments.StartHeaders = createGenesisBlocks(shardC) @@ -4163,7 +4232,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, ok := poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, nil, metaBlockRestored) assert.False(t, ok) @@ -4181,7 +4250,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, _ = poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, meta, metaBlockRestored) assert.Nil(t, err) diff --git a/process/constants.go b/process/constants.go index ef837a3b01f..1b7e74f48c2 100644 --- a/process/constants.go +++ b/process/constants.go @@ -24,6 +24,8 @@ const ( SCDeployment // SCInvoking defines ID of a transaction of type smart contract call SCInvoking + // RewardTx defines ID of a reward transaction + RewardTx // InvalidTransaction defines unknown transaction type InvalidTransaction ) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 0f44376fd3a..9002696b0d2 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -74,6 +74,7 @@ func NewTransactionCoordinator( if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { return nil, process.ErrNilMiniBlockPool } + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) tc.onRequestMiniBlock = requestHandler.RequestMiniBlock @@ -206,8 +207,7 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error errMutex := sync.Mutex{} wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) + wg.Add(len(separatedBodies) + len(tc.keysInterimProcs)) for key, value := range separatedBodies { go func(blockType block.Type, blockBody block.Body) { @@ -230,22 +230,29 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error }(key, value) } - wg.Wait() + for _, blockType := range tc.keysInterimProcs { + go func(blockType block.Type) { + intermediateProc := tc.getInterimProcessor(blockType) + if intermediateProc == nil { + wg.Done() + return + } - intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreproc == nil { - return errFound - } + err := intermediateProc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - errMutex.Lock() - errFound = err - errMutex.Unlock() + wg.Done() + }(blockType) } + wg.Wait() + return errFound } @@ -330,10 +337,14 @@ func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error func (tc *transactionCoordinator) ProcessBlockTransaction( body block.Body, round uint64, - haveTime func() time.Duration, + timeRemaining func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) + haveTime := func() bool { + return timeRemaining() >= 0 + } + + separatedBodies := tc.separateBodyByType(body) // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysTxPreProcs { if separatedBodies[blockType] == nil { @@ -439,43 +450,27 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false + for _, blockType := range tc.keysTxPreProcs { - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } + txPreProc := tc.getPreProcessor(blockType) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } + mbs, err := txPreProc.CreateAndProcessMiniBlocks( + maxTxSpaceRemained, + maxMbSpaceRemained, + round, + haveTime, + ) - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } + if err != nil { + log.Error(err.Error()) + } - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } + if len(mbs) > 0 { + miniBlocks = append(miniBlocks, mbs...) } } @@ -492,6 +487,11 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + interimProc := tc.getInterimProcessor(blockType) if interimProc == nil { // this will never be reached as keysInterimProcs are the actual keys from the interimMap @@ -556,6 +556,8 @@ func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType b baseTopic = factory.PeerChBodyTopic case block.SmartContractResultBlock: baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlock: + baseTopic = factory.RewardsTransactionTopic default: return "", process.ErrUnknownBlockType } @@ -689,7 +691,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( snapshot := tc.accounts.JournalLen() err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) if err != nil { - log.Debug(err.Error()) + log.Error(err.Error()) errAccountState := tc.accounts.RevertToSnapshot(snapshot) if errAccountState != nil { // TODO: evaluate if reloading the trie from disk will might solve the problem @@ -705,14 +707,20 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { tc.mutInterimProcessors.RLock() - + defer tc.mutInterimProcessors.RUnlock() errMutex := sync.Mutex{} var errFound error - + // TODO: think if it is good in parallel or it is needed in sequences wg := sync.WaitGroup{} wg.Add(len(tc.interimProcessors)) - for _, interimProc := range tc.interimProcessors { + for key, interimProc := range tc.interimProcessors { + if key == block.RewardsBlock { + // this has to be processed last + wg.Done() + continue + } + go func(intermediateProcessor process.IntermediateTransactionHandler) { err := intermediateProcessor.VerifyInterMiniBlocks(body) if err != nil { @@ -725,9 +733,17 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body } wg.Wait() - tc.mutInterimProcessors.RUnlock() - return errFound + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index f4f43b6bc2c..9e2f2b5538b 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -29,68 +30,55 @@ import ( "github.com/stretchr/testify/assert" ) +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) (func() dataRetriever.ShardedDataCacherNotifier ) { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + func initDataPool(testHash []byte) *mock.PoolsHolderStub { + tx := &transaction.Transaction{Nonce: 10} + sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} + rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} + + txCalled := createShardedDataChacherNotifier(tx, testHash) + unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) + rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) + sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10, Data: id}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2"), testHash} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxHandler, + RewardTransactionsCalled: rewardTxCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, @@ -156,6 +144,7 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { } return sdp } + func containsHash(txHashes [][]byte, hash []byte) bool { for _, txHash := range txHashes { if bytes.Equal(hash, txHash) { @@ -208,7 +197,7 @@ func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { tc, err := NewTransactionCoordinator( nil, &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -224,7 +213,7 @@ func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -256,7 +245,7 @@ func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -272,7 +261,7 @@ func TestNewTransactionCoordinator_NilHasher(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, nil, &mock.InterimProcessorContainerMock{}, @@ -288,7 +277,7 @@ func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, nil, @@ -304,7 +293,7 @@ func TestNewTransactionCoordinator_OK(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -320,7 +309,7 @@ func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -338,7 +327,7 @@ func TestTransactionCoordinator_SeparateBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -378,6 +367,8 @@ func createPreProcessorContainer() process.PreProcessorsContainer { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -390,7 +381,9 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, initStore(), + initDataPool([]byte("test_hash1")), ) container, _ := preFactory.Create() @@ -414,6 +407,8 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -426,7 +421,7 @@ func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -450,7 +445,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -489,7 +484,7 @@ func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -510,7 +505,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), interimContainer, @@ -562,7 +557,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -610,7 +605,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -636,7 +631,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -683,6 +678,8 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -757,6 +754,9 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return shardedCacheMock }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, }, &mock.AddressConverterMock{}, &mock.AccountsStub{}, @@ -768,13 +768,15 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, container, &mock.InterimProcessorContainerMock{}, @@ -886,12 +888,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMiniblocks(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + nrShards := uint32(5) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } - nrShards := uint32(5) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(nrShards), @@ -1074,7 +1076,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //1 tx hash will be in cache @@ -1130,6 +1132,8 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1282,6 +1286,8 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1400,6 +1406,8 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1435,7 +1443,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed @@ -1513,6 +1521,8 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1544,7 +1554,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed @@ -1617,6 +1627,8 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) container, _ := preFactory.Create() @@ -1653,7 +1665,9 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, ) container, _ := preFactory.Create() @@ -1696,7 +1710,9 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, ) container, _ := preFactory.Create() diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index b523d7b6dca..933cb6a353a 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -4,6 +4,7 @@ import ( "bytes" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -47,6 +48,11 @@ func (tc *txTypeHandler) ComputeTransactionType(tx data.TransactionHandler) (pro return process.InvalidTransaction, err } + _, isRewardTx := tx.(*rewardTx.RewardTx) + if isRewardTx { + return process.RewardTx, nil + } + isEmptyAddress := tc.isDestAddressEmpty(tx) if isEmptyAddress { if len(tx.GetData()) > 0 { diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go new file mode 100644 index 00000000000..af177bf6cfb --- /dev/null +++ b/process/coordinator/transactionType_test.go @@ -0,0 +1,268 @@ +package coordinator + +import ( + "crypto/rand" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewTxTypeHandler_NilAddrConv(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewTxTypeHandler_NilShardCoord(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + nil, + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewTxTypeHandler_NilAccounts(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewTxTypeHandler_ValsOk(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) +} + +func generateRandomByteSlice(size int) []byte { + buff := make([]byte, size) + _, _ = rand.Reader.Read(buff) + + return buff +} + +func createAccounts(tx *transaction.Transaction) (state.AccountHandler, state.AccountHandler) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acntSrc, _ := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, tx.Value) + totalFee := big.NewInt(0) + totalFee = totalFee.Mul(big.NewInt(int64(tx.GasLimit)), big.NewInt(int64(tx.GasPrice))) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, totalFee) + + acntDst, _ := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + + return acntSrc, acntDst +} + +func TestTxTypeHandler_ComputeTransactionTypeNil(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + _, err = tth.ComputeTransactionType(nil) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeNilTx(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(45) + + tx = nil + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = nil + tx.Value = big.NewInt(45) + + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeScDeployment(t *testing.T) { + t.Parallel() + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = make([]byte, addressConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCDeployment, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + acntDst.SetCode([]byte("code")) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCInvoking, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.MoveBalance, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeRewardTx(t *testing.T) { + t.Parallel() + + addrConv := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addrConv, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &rewardTx.RewardTx{RcvAddr: []byte("leader")} + txType, err := tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) + assert.Equal(t, process.InvalidTransaction, txType) + + tx = &rewardTx.RewardTx{RcvAddr: generateRandomByteSlice(addrConv.AddressLen())} + txType, err = tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.RewardTx, txType) +} diff --git a/process/errors.go b/process/errors.go index d6833eebfcd..72db2088f8a 100644 --- a/process/errors.go +++ b/process/errors.go @@ -175,6 +175,9 @@ var ErrNegativeValue = errors.New("negative value") // ErrNilShardCoordinator signals that an operation has been attempted to or with a nil shard coordinator var ErrNilShardCoordinator = errors.New("nil shard coordinator") +// ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + // ErrInvalidRcvAddr signals that an operation has been attempted to or with an invalid receiver address var ErrInvalidRcvAddr = errors.New("invalid receiver address") @@ -187,6 +190,9 @@ var ErrNilKeyGen = errors.New("nil key generator") // ErrNilSingleSigner signals that a nil single signer is used var ErrNilSingleSigner = errors.New("nil single signer") +// ErrBlockProposerSignatureMissing signals that block proposer signature is missing from the block aggregated sig +var ErrBlockProposerSignatureMissing = errors.New("block proposer signature is missing") + // ErrNilMultiSigVerifier signals that a nil multi-signature verifier is used var ErrNilMultiSigVerifier = errors.New("nil multi-signature verifier") @@ -265,6 +271,9 @@ var ErrNilResolverContainer = errors.New("nil resolver container") // ErrNilRequestHandler signals that a nil request handler interface was provided var ErrNilRequestHandler = errors.New("nil request handler") +// ErrNilInternalTransactionProducer signals that a nil system transactions producer was provided +var ErrNilInternalTransactionProducer = errors.New("nil internal transaction producere") + // ErrNilHaveTimeHandler signals that a nil have time handler func was provided var ErrNilHaveTimeHandler = errors.New("nil have time handler") @@ -289,9 +298,6 @@ var ErrNoTransactionInMessage = errors.New("no transaction found in received mes // ErrNilBuffer signals that a provided byte buffer is nil var ErrNilBuffer = errors.New("provided byte buffer is nil") -// ErrNilChronologyValidator signals that a nil chronology validator has been provided -var ErrNilChronologyValidator = errors.New("provided chronology validator object is nil") - // ErrNilRandSeed signals that a nil rand seed has been provided var ErrNilRandSeed = errors.New("provided rand seed is nil") @@ -364,6 +370,9 @@ var ErrNilVMOutput = errors.New("nil vm output") // ErrNilBalanceFromSC signals that balance is nil var ErrNilBalanceFromSC = errors.New("output balance from VM is nil") +// ErrNilValueFromRewardTransaction signals that the transfered value is nil +var ErrNilValueFromRewardTransaction = errors.New("transferred value is nil in reward transaction") + // ErrNilTemporaryAccountsHandler signals that temporary accounts handler is nil var ErrNilTemporaryAccountsHandler = errors.New("temporary accounts handler is nil") @@ -373,18 +382,33 @@ var ErrNotEnoughValidBlocksInStorage = errors.New("not enough valid blocks in st // ErrNilSmartContractResult signals that the smart contract result is nil var ErrNilSmartContractResult = errors.New("smart contract result is nil") +// ErrNilRewardTransaction signals that the reward transaction is nil +var ErrNilRewardTransaction = errors.New("reward transaction is nil") + +// ErrRewardTransactionNotFound is raised when reward transaction should be present but was not found +var ErrRewardTransactionNotFound = errors.New("reward transaction not found") + // ErrInvalidDataInput signals that the data input is invalid for parsing var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") // ErrNoUnsignedTransactionInMessage signals that message does not contain required data var ErrNoUnsignedTransactionInMessage = errors.New("no unsigned transactions in message") +// ErrNoRewardTransactionInMessage signals that message does not contain required data +var ErrNoRewardTransactionInMessage = errors.New("no reward transactions in message") + // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") +// ErrNilRewardTxDataPool signals that the reward transactions pool is nil +var ErrNilRewardTxDataPool = errors.New("reward transactions pool is nil") + // ErrNilUTxStorage signals that unsigned transaction storage is nil var ErrNilUTxStorage = errors.New("unsigned transactions storage is nil") +// ErrNilRewardsTxStorage signals that rewards transaction storage is nil +var ErrNilRewardsTxStorage = errors.New("reward transactions storage is nil") + // ErrNilScAddress signals that a nil smart contract address has been provided var ErrNilScAddress = errors.New("nil SC address") @@ -409,6 +433,9 @@ var ErrNilUint64Converter = errors.New("unit64converter is nil") // ErrNilSmartContractResultProcessor signals that smart contract result processor is nil var ErrNilSmartContractResultProcessor = errors.New("nil smart contract result processor") +// ErrNilRewardsTxProcessor signals that the rewards transaction processor is nil +var ErrNilRewardsTxProcessor = errors.New("nil rewards transaction processor") + // ErrNilIntermediateProcessorContainer signals that intermediate processors container is nil var ErrNilIntermediateProcessorContainer = errors.New("intermediate processor container is nil") @@ -430,6 +457,27 @@ var ErrNilHeaderHandlerValidator = errors.New("nil header handler validator prov // ErrNilAppStatusHandler defines the error for setting a nil AppStatusHandler var ErrNilAppStatusHandler = errors.New("nil AppStatusHandler") +// ErrNotEnoughFeeInTransactions signals that the transaction does not enough fee +var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") + +// ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil +var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") + +// ErrRewardTxsDoNotMatch signals that reward txs do not match +var ErrRewardTxsDoNotMatch = errors.New("calculated reward tx with block reward tx does not match") + +// ErrRewardTxNotFound signals that the reward transaction was not found +var ErrRewardTxNotFound = errors.New("reward transaction not found") + +// ErrRewardTxsMismatchCreatedReceived signals a mismatch between the nb of created and received reward transactions +var ErrRewardTxsMismatchCreatedReceived = errors.New("mismatch between created and received reward transactions") + +// ErrNilTxTypeHandler signals that tx type handler is nil +var ErrNilTxTypeHandler = errors.New("nil tx type handler") + +// ErrNilSpecialAddressHandler signals that special address handler is nil +var ErrNilSpecialAddressHandler = errors.New("nil special address handler") + // ErrNotEnoughArgumentsToDeploy signals that there are not enough arguments to deploy the smart contract var ErrNotEnoughArgumentsToDeploy = errors.New("not enough arguments to deploy the smart contract") diff --git a/process/factory/factory.go b/process/factory/factory.go index d65c656aca7..5a5fa359840 100644 --- a/process/factory/factory.go +++ b/process/factory/factory.go @@ -5,6 +5,8 @@ const ( TransactionTopic = "transactions" // UnsignedTransactionTopic is the topic used for sharing unsigned transactions UnsignedTransactionTopic = "unsignedTransactions" + // RewardsTransactionTopic is the topic used for sharing fee transactions + RewardsTransactionTopic = "rewardsTransactions" // HeadersTopic is the topic used for sharing block headers HeadersTopic = "headers" // MiniBlocksTopic is the topic used for sharing mini blocks diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index 80338b1ddd4..b07dd0f535d 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -15,32 +15,35 @@ import ( ) type interceptorsContainerFactory struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - store dataRetriever.StorageService - dataPool dataRetriever.MetaPoolsHolder - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - multiSigner crypto.MultiSigner - chronologyValidator process.ChronologyValidator - tpsBenchmark *statistics.TpsBenchmark + marshalizer marshal.Marshalizer + hasher hashing.Hasher + store dataRetriever.StorageService + dataPool dataRetriever.MetaPoolsHolder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + messenger process.TopicHandler + multiSigner crypto.MultiSigner + tpsBenchmark *statistics.TpsBenchmark } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, hasher hashing.Hasher, multiSigner crypto.MultiSigner, dataPool dataRetriever.MetaPoolsHolder, - chronologyValidator process.ChronologyValidator, ) (*interceptorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator + } if messenger == nil { return nil, process.ErrNilMessenger } @@ -59,19 +62,16 @@ func NewInterceptorsContainerFactory( if dataPool == nil || dataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator - } return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - multiSigner: multiSigner, - dataPool: dataPool, - chronologyValidator: chronologyValidator, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + multiSigner: multiSigner, + dataPool: dataPool, }, nil } @@ -134,7 +134,7 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err @@ -187,7 +187,7 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(identif icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, err diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index b08e783cd8e..6cca5fc63f8 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -78,31 +78,49 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := metachain.NewInterceptorsContainerFactory( nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -114,13 +132,13 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -132,13 +150,13 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -150,13 +168,13 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, nil, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -168,13 +186,13 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -186,13 +204,13 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -204,13 +222,13 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.NotNil(t, icf) @@ -224,13 +242,13 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -244,13 +262,13 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.ShardHeadersForMetachainTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -264,13 +282,13 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -284,13 +302,13 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.ShardHeadersForMetachainTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -304,6 +322,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -317,7 +336,6 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -335,8 +353,16 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + icf, _ := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -350,7 +376,6 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, _ := icf.Create() diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 302eed9633b..66b029ced3c 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/dataValidators" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" @@ -18,23 +19,24 @@ import ( type interceptorsContainerFactory struct { accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - chronologyValidator process.ChronologyValidator + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, @@ -44,7 +46,6 @@ func NewInterceptorsContainerFactory( multiSigner crypto.MultiSigner, dataPool dataRetriever.PoolsHolder, addrConverter state.AddressConverter, - chronologyValidator process.ChronologyValidator, ) (*interceptorsContainerFactory, error) { if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter @@ -79,23 +80,23 @@ func NewInterceptorsContainerFactory( if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator } return &interceptorsContainerFactory{ accounts: accounts, - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, - chronologyValidator: chronologyValidator, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, }, nil } @@ -123,6 +124,16 @@ func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer return nil, err } + keys, interceptorSlice, err = icf.generateRewardTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + keys, interceptorSlice, err = icf.generateHdrInterceptor() if err != nil { return nil, err @@ -238,6 +249,60 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier strin return icf.createTopicAndAssignHandler(identifier, interceptor, true) } +//------- Reward transactions interceptors + +func (icf *interceptorsContainerFactory) generateRewardTxInterceptors() ([]string, []process.Interceptor, error) { + shardC := icf.shardCoordinator + + noOfShards := shardC.NumberOfShards() + + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) + + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } + + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } + + identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } + + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + + return keys, interceptorSlice, nil +} + +func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(identifier string) (process.Interceptor, error) { + rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) + + interceptor, err := rewardTransaction.NewRewardTxInterceptor( + icf.marshalizer, + icf.dataPool.RewardTransactions(), + rewardTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator, + ) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) +} + //------- Unsigned transactions interceptors func (icf *interceptorsContainerFactory) generateUnsignedTxsInterceptors() ([]string, []process.Interceptor, error) { @@ -311,7 +376,7 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err @@ -411,7 +476,7 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 19efae2c75d..173402887fc 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -65,6 +65,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -83,6 +86,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -92,7 +96,6 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -105,6 +108,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -114,19 +118,41 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, @@ -136,7 +162,6 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -149,6 +174,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, @@ -158,7 +184,6 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -171,6 +196,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, @@ -180,7 +206,6 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -193,6 +218,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -202,7 +228,6 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -215,6 +240,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -224,7 +250,6 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -237,6 +262,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -246,7 +272,6 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -259,6 +284,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -268,7 +294,6 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { nil, createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -281,6 +306,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -290,7 +316,6 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -303,6 +328,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -312,7 +338,6 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -325,6 +350,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -334,7 +360,6 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) assert.NotNil(t, icf) @@ -349,6 +374,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.TransactionTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -358,7 +384,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -373,6 +398,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.HeadersTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -382,7 +408,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -397,6 +422,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MiniBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -406,7 +432,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -421,6 +446,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.PeerChBodyTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -430,7 +456,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -445,6 +470,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -454,7 +480,6 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -469,6 +494,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.TransactionTopic), createStore(), &mock.MarshalizerMock{}, @@ -478,7 +504,6 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -493,6 +518,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.HeadersTopic), createStore(), &mock.MarshalizerMock{}, @@ -502,7 +528,6 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -517,6 +542,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MiniBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -526,7 +552,6 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -541,6 +566,7 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.PeerChBodyTopic), createStore(), &mock.MarshalizerMock{}, @@ -550,7 +576,6 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -565,6 +590,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -574,7 +600,6 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -589,6 +614,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -605,7 +631,6 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -623,9 +648,17 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -642,18 +675,20 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, ) container, _ := icf.Create() numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := numInterceptorTxs numInterceptorHeaders := 1 numInterceptorMiniBlocks := noOfShards numInterceptorPeerChanges := 1 numInterceptorMetachainHeaders := 1 totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + - numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorTxs + numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + + numInterceptorsRewardTxs assert.Equal(t, totalInterceptors, container.Len()) } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 87d27a55ffa..fc70456c858 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -13,11 +13,13 @@ import ( ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConverter state.AddressConverter - store dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConverter state.AddressConverter + specialAddressHandler process.SpecialAddressHandler + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -26,7 +28,9 @@ func NewIntermediateProcessorsContainerFactory( marshalizer marshal.Marshalizer, hasher hashing.Hasher, addrConverter state.AddressConverter, + specialAddressHandler process.SpecialAddressHandler, store dataRetriever.StorageService, + poolsHolder dataRetriever.PoolsHolder, ) (*intermediateProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -41,16 +45,24 @@ func NewIntermediateProcessorsContainerFactory( if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } if store == nil || store.IsInterfaceNil() { return nil, process.ErrNilStorage } + if poolsHolder == nil || poolsHolder.IsInterfaceNil() { + return nil, process.ErrNilPoolsHolder + } return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - addrConverter: addrConverter, - store: store, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + addrConverter: addrConverter, + specialAddressHandler: specialAddressHandler, + store: store, + poolsHolder: poolsHolder, }, nil } @@ -68,6 +80,16 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } + interproc, err = ppcm.createRewardsTxIntermediateProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.RewardsBlock, interproc) + if err != nil { + return nil, err + } + return container, nil } @@ -84,6 +106,20 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn return irp, err } +func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := preprocess.NewRewardTxHandler( + ppcm.specialAddressHandler, + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + ppcm.poolsHolder.RewardTransactions(), + ) + + return irp, err +} + // IsInterfaceNil returns true if there is no value under the interface func (ppcm *intermediateProcessorsContainerFactory) IsInterfaceNil() bool { if ppcm == nil { diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index e49063ee632..3c596728885 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -1,7 +1,8 @@ -package shard +package shard_test import ( "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" "testing" @@ -10,12 +11,15 @@ import ( func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -25,12 +29,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), nil, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -40,12 +47,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, nil, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -55,12 +65,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, ipcf) @@ -70,12 +83,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, nil, + dPool, ) assert.Nil(t, ipcf) @@ -85,12 +101,15 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, err) @@ -100,12 +119,15 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, ) assert.Nil(t, err) @@ -113,5 +135,5 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { container, err := ipcf.Create() assert.Nil(t, err) - assert.Equal(t, 1, container.Len()) + assert.Equal(t, 2, container.Len()) } diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 1c4049089b4..280cf080667 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -13,17 +13,19 @@ import ( ) type preProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - txProcessor process.TransactionProcessor - scProcessor process.SmartContractProcessor - scResultProcessor process.SmartContractResultProcessor - accounts state.AccountsAdapter - requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + txProcessor process.TransactionProcessor + scProcessor process.SmartContractProcessor + scResultProcessor process.SmartContractResultProcessor + rewardsTxProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter + requestHandler process.RequestHandler + rewardsProducer process.InternalTransactionProducer } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -39,6 +41,8 @@ func NewPreProcessorsContainerFactory( txProcessor process.TransactionProcessor, scProcessor process.SmartContractProcessor, scResultProcessor process.SmartContractResultProcessor, + rewardsTxProcessor process.RewardTransactionProcessor, + rewardsProducer process.InternalTransactionProducer, ) (*preProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -71,22 +75,30 @@ func NewPreProcessorsContainerFactory( if scResultProcessor == nil || scResultProcessor.IsInterfaceNil() { return nil, process.ErrNilSmartContractResultProcessor } + if rewardsTxProcessor == nil || rewardsTxProcessor.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxProcessor + } if requestHandler == nil || requestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } + if rewardsProducer == nil || rewardsProducer.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } return &preProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - store: store, - marshalizer: marshalizer, - hasher: hasher, - dataPool: dataPool, - addrConverter: addrConverter, - txProcessor: txProcessor, - accounts: accounts, - scProcessor: scProcessor, - scResultProcessor: scResultProcessor, - requestHandler: requestHandler, + shardCoordinator: shardCoordinator, + store: store, + marshalizer: marshalizer, + hasher: hasher, + dataPool: dataPool, + addrConverter: addrConverter, + txProcessor: txProcessor, + accounts: accounts, + scProcessor: scProcessor, + scResultProcessor: scResultProcessor, + rewardsTxProcessor: rewardsTxProcessor, + requestHandler: requestHandler, + rewardsProducer: rewardsProducer, }, nil } @@ -114,6 +126,16 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return nil, err } + preproc, err = ppcm.createRewardsTransactionPreProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.RewardsBlock, preproc) + if err != nil { + return nil, err + } + return container, nil } @@ -147,6 +169,22 @@ func (ppcm *preProcessorsContainerFactory) createSmartContractResultPreProcessor return scrPreprocessor, err } +func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor() (process.PreProcessor, error) { + rewardTxPreprocessor, err := preprocess.NewRewardTxPreprocessor( + ppcm.dataPool.RewardTransactions(), + ppcm.store, + ppcm.hasher, + ppcm.marshalizer, + ppcm.rewardsTxProcessor, + ppcm.rewardsProducer, + ppcm.shardCoordinator, + ppcm.accounts, + ppcm.requestHandler.RequestRewardTransactions, + ) + + return rewardTxPreprocessor, err +} + // IsInterfaceNil returns true if there is no value under the interface func (ppcm *preProcessorsContainerFactory) IsInterfaceNil() bool { if ppcm == nil { diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index 9a21fb18740..7d25e5015a6 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -16,13 +16,15 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -37,13 +39,15 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -58,13 +62,15 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.ChainStorerMock{}, nil, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -79,13 +85,15 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -107,6 +115,8 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -121,13 +131,15 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -142,13 +154,15 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, nil, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -163,13 +177,15 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, nil, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -184,13 +200,15 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, nil, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -205,19 +223,44 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, nil, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) assert.Nil(t, ppcm) } +func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { + t.Parallel() + + ppcm, err := NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.ChainStorerMock{}, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewPoolsHolderMock(), + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}, + ) + + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) + assert.Nil(t, ppcm) +} + func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { t.Parallel() @@ -226,13 +269,15 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, nil, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -247,13 +292,15 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -279,6 +326,8 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -314,6 +363,8 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -339,6 +390,12 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { }, } } + dataPool.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) { + }, + } + } ppcm, err := NewPreProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), @@ -352,12 +409,14 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) assert.NotNil(t, ppcm) container, err := ppcm.Create() - assert.Equal(t, 2, container.Len()) + assert.Equal(t, 3, container.Len()) assert.Nil(t, err) } diff --git a/process/interface.go b/process/interface.go index c4abb19cb6a..d4c2aa377cb 100644 --- a/process/interface.go +++ b/process/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -23,6 +24,18 @@ type TransactionProcessor interface { IsInterfaceNil() bool } +// RewardTransactionProcessor is the interface for reward transaction execution engine +type RewardTransactionProcessor interface { + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error + IsInterfaceNil() bool +} + +// RewardTransactionPreProcessor prepares the processing of reward transactions +type RewardTransactionPreProcessor interface { + AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) + IsInterfaceNil() bool +} + // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error @@ -92,6 +105,40 @@ type IntermediateTransactionHandler interface { IsInterfaceNil() bool } +// InternalTransactionProducer creates system transactions (e.g. rewards) +type InternalTransactionProducer interface { + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + IsInterfaceNil() bool +} + +// TransactionVerifier interface validates if the transaction is good and if it should be processed +type TransactionVerifier interface { + IsTransactionValid(tx data.TransactionHandler) error +} + +// TransactionFeeHandler processes the transaction fee +type TransactionFeeHandler interface { + ProcessTransactionFee(cost *big.Int) + IsInterfaceNil() bool +} + +// SpecialAddressHandler responds with needed special addresses +type SpecialAddressHandler interface { + SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error + SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error + ConsensusShardRewardData() *data.ConsensusRewardData + ConsensusMetaRewardData() []*data.ConsensusRewardData + ClearMetaConsensusData() + ElrondCommunityAddress() []byte + LeaderAddress() []byte + BurnAddress() []byte + SetElrondCommunityAddress(elrond []byte) + ShardIdForAddress([]byte) (uint32, error) + Epoch() uint32 + Round() uint64 + IsInterfaceNil() bool +} + // PreProcessor is an interface used to prepare and process transaction data type PreProcessor interface { CreateBlockStarted() @@ -101,7 +148,7 @@ type PreProcessor interface { RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactions(body block.Body) int CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) @@ -109,6 +156,7 @@ type PreProcessor interface { RequestTransactionsForMiniBlock(mb block.MiniBlock) int ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + CreateAndProcessMiniBlocks(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) GetAllCurrentUsedTxs() map[string]data.TransactionHandler IsInterfaceNil() bool @@ -126,6 +174,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) IsInterfaceNil() bool } @@ -286,13 +335,6 @@ type TopicMessageHandler interface { TopicHandler } -// ChronologyValidator defines the functionality needed to validate a received header block (shard or metachain) -// from chronology point of view -type ChronologyValidator interface { - ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error - IsInterfaceNil() bool -} - // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { PackDataInChunks(data [][]byte, limit int) ([][]byte, error) @@ -314,6 +356,7 @@ type RequestHandler interface { RequestHeaderByNonce(shardId uint32, nonce uint64) RequestTransaction(shardId uint32, txHashes [][]byte) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) RequestMiniBlock(shardId uint32, miniblockHash []byte) RequestHeader(shardId uint32, hash []byte) IsInterfaceNil() bool diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 9c764a288e0..100e94f5e9b 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,6 +67,10 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/process/mock/chronologyValidatorStub.go b/process/mock/chronologyValidatorStub.go deleted file mode 100644 index fbb258ae219..00000000000 --- a/process/mock/chronologyValidatorStub.go +++ /dev/null @@ -1,17 +0,0 @@ -package mock - -type ChronologyValidatorStub struct { - ValidateReceivedBlockCalled func(shardID uint32, epoch uint32, nonce uint64, round uint64) error -} - -func (cvs *ChronologyValidatorStub) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return cvs.ValidateReceivedBlockCalled(shardID, epoch, nonce, round) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cvs *ChronologyValidatorStub) IsInterfaceNil() bool { - if cvs == nil { - return true - } - return false -} diff --git a/process/mock/multiSigMock.go b/process/mock/multiSigMock.go index 453660e848f..7b3c2bc8633 100644 --- a/process/mock/multiSigMock.go +++ b/process/mock/multiSigMock.go @@ -1,6 +1,8 @@ package mock import ( + "bytes" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/hashing" ) @@ -77,13 +79,28 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + if msg == nil { + return crypto.ErrNilMessage + } + + if bitmap == nil { + return crypto.ErrNilBitmap + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point func (bnm *BelNevMock) CreateCommitment() (commSecret []byte, commitment []byte) { + if bnm.CreateCommitmentMock != nil { + return bnm.CreateCommitmentMock() + } - return bnm.CreateCommitmentMock() + return []byte("commitment secret"), []byte("commitment") } // StoreCommitmentHash adds a commitment hash to the list on the specified position @@ -92,18 +109,18 @@ func (bnm *BelNevMock) StoreCommitmentHash(index uint16, commHash []byte) error bnm.commHash = commHash return nil - } else { - return bnm.StoreCommitmentHashMock(index, commHash) } + + return bnm.StoreCommitmentHashMock(index, commHash) } // CommitmentHash returns the commitment hash from the list on the specified position func (bnm *BelNevMock) CommitmentHash(index uint16) ([]byte, error) { if bnm.CommitmentHashMock == nil { return bnm.commHash, nil - } else { - return bnm.CommitmentHashMock(index) } + + return bnm.CommitmentHashMock(index) } // StoreCommitment adds a commitment to the list on the specified position @@ -116,9 +133,9 @@ func (bnm *BelNevMock) StoreCommitment(index uint16, value []byte) error { bnm.commitments[index] = value return nil - } else { - return bnm.StoreCommitmentMock(index, value) } + + return bnm.StoreCommitmentMock(index, value) } // Commitment returns the commitment from the list with the specified position @@ -129,19 +146,27 @@ func (bnm *BelNevMock) Commitment(index uint16) ([]byte, error) { } return bnm.commitments[index], nil - } else { - return bnm.CommitmentMock(index) } + + return bnm.CommitmentMock(index) } // AggregateCommitments aggregates the list of commitments func (bnm *BelNevMock) AggregateCommitments(bitmap []byte) error { - return bnm.AggregateCommitmentsMock(bitmap) + if bnm.AggregateCommitmentsMock != nil { + return bnm.AggregateCommitmentsMock(bitmap) + } + + return nil } // CreateSignatureShare creates a partial signature func (bnm *BelNevMock) CreateSignatureShare(msg []byte, bitmap []byte) ([]byte, error) { - return bnm.CreateSignatureShareMock(msg, bitmap) + if bnm.CreateSignatureShareMock != nil { + return bnm.CreateSignatureShareMock(msg, bitmap) + } + + return []byte("signature share"), nil } // StoreSignatureShare adds the partial signature of the signer with specified position @@ -156,12 +181,28 @@ func (bnm *BelNevMock) StoreSignatureShare(index uint16, sig []byte) error { // VerifySignatureShare verifies the partial signature of the signer with specified position func (bnm *BelNevMock) VerifySignatureShare(index uint16, sig []byte, msg []byte, bitmap []byte) error { - return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + if bnm.VerifySignatureShareMock(index, sig, msg, bitmap) != nil { + return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + } + + if bytes.Equal([]byte("signature share"), sig) { + return nil + } + + return crypto.ErrSigNotValid } // AggregateSigs aggregates all collected partial signatures func (bnm *BelNevMock) AggregateSigs(bitmap []byte) ([]byte, error) { - return bnm.AggregateSigsMock(bitmap) + if bnm.AggregateSigsMock != nil { + return bnm.AggregateSigsMock(bitmap) + } + + if bitmap == nil { + return nil, crypto.ErrNilBitmap + } + + return []byte("aggregated signature"), nil } // SignatureShare diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..c9bc56f6276 --- /dev/null +++ b/process/mock/nodesCoordinatorMock.go @@ -0,0 +1,185 @@ +package mock + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + for sh := uint32(0); sh < nbShards; sh++ { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + pubKeys := make([]string, 0) + + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { + var consensusSize uint32 + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } + + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/process/mock/poolsHolderFake.go b/process/mock/poolsHolderMock.go similarity index 56% rename from process/mock/poolsHolderFake.go rename to process/mock/poolsHolderMock.go index 273a9d0f9c6..f3f9a30c576 100644 --- a/process/mock/poolsHolderFake.go +++ b/process/mock/poolsHolderMock.go @@ -9,9 +9,10 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) -type PoolsHolderFake struct { +type PoolsHolderMock struct { transactions dataRetriever.ShardedDataCacherNotifier - unsignedtransactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher hdrNonces dataRetriever.Uint64SyncMapCacher @@ -20,10 +21,11 @@ type PoolsHolderFake struct { metaHdrNonces dataRetriever.Uint64SyncMapCacher } -func NewPoolsHolderFake() *PoolsHolderFake { - phf := &PoolsHolderFake{} +func NewPoolsHolderMock() *PoolsHolderMock { + phf := &PoolsHolderMock{} phf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) - phf.unsignedtransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) phf.headers, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) phf.metaBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) cacheHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) @@ -41,48 +43,52 @@ func NewPoolsHolderFake() *PoolsHolderFake { return phf } -func (phf *PoolsHolderFake) Transactions() dataRetriever.ShardedDataCacherNotifier { - return phf.transactions +func (phm *PoolsHolderMock) Transactions() dataRetriever.ShardedDataCacherNotifier { + return phm.transactions } -func (phf *PoolsHolderFake) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { - return phf.unsignedtransactions +func (phm *PoolsHolderMock) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.unsignedTransactions } -func (phf *PoolsHolderFake) Headers() storage.Cacher { - return phf.headers +func (phm *PoolsHolderMock) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.rewardTransactions } -func (phf *PoolsHolderFake) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.hdrNonces +func (phm *PoolsHolderMock) Headers() storage.Cacher { + return phm.headers } -func (phf *PoolsHolderFake) MiniBlocks() storage.Cacher { - return phf.miniBlocks +func (phm *PoolsHolderMock) HeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.hdrNonces } -func (phf *PoolsHolderFake) PeerChangesBlocks() storage.Cacher { - return phf.peerChangesBlocks +func (phm *PoolsHolderMock) MiniBlocks() storage.Cacher { + return phm.miniBlocks } -func (phf *PoolsHolderFake) MetaBlocks() storage.Cacher { - return phf.metaBlocks +func (phm *PoolsHolderMock) PeerChangesBlocks() storage.Cacher { + return phm.peerChangesBlocks } -func (phf *PoolsHolderFake) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.metaHdrNonces +func (phm *PoolsHolderMock) MetaBlocks() storage.Cacher { + return phm.metaBlocks } -func (phf *PoolsHolderFake) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { - phf.transactions = transactions +func (phm *PoolsHolderMock) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.metaHdrNonces } -func (phf *PoolsHolderFake) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { - phf.unsignedtransactions = scrs +func (phm *PoolsHolderMock) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { + phm.transactions = transactions +} + +func (phm *PoolsHolderMock) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { + phm.unsignedTransactions = scrs } // IsInterfaceNil returns true if there is no value under the interface -func (phf *PoolsHolderFake) IsInterfaceNil() bool { +func (phf *PoolsHolderMock) IsInterfaceNil() bool { if phf == nil { return true } diff --git a/process/mock/poolsHolderStub.go b/process/mock/poolsHolderStub.go index 43599982ea8..d189b57d055 100644 --- a/process/mock/poolsHolderStub.go +++ b/process/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -43,6 +44,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index f3a1484430a..ab03b54b001 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -14,11 +14,12 @@ type PreProcessorMock struct { RemoveTxBlockFromPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) error RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorageCalled func(body block.Body) error - ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactionsCalled func(body block.Body) int CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) RequestTransactionsForMiniBlockCalled func(mb block.MiniBlock) int ProcessMiniBlockCalled func(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlocksCalled func(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) CreateAndProcessMiniBlockCalled func(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) GetAllCurrentUsedTxsCalled func() map[string]data.TransactionHandler } @@ -58,7 +59,7 @@ func (ppm *PreProcessorMock) SaveTxBlockToStorage(body block.Body) error { return ppm.SaveTxBlockToStorageCalled(body) } -func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { if ppm.ProcessBlockTransactionsCalled == nil { return nil } @@ -93,6 +94,20 @@ func (ppm *PreProcessorMock) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTi return ppm.ProcessMiniBlockCalled(miniBlock, haveTime, round) } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (ppm *PreProcessorMock) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + if ppm.CreateAndProcessMiniBlocksCalled == nil { + return nil, nil + } + return ppm.CreateAndProcessMiniBlocksCalled(maxTxSpaceRemained, maxMbSpaceRemained, round, haveTime) +} + func (ppm *PreProcessorMock) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { if ppm.CreateAndProcessMiniBlockCalled == nil { return nil, nil diff --git a/process/mock/requestHandlerMock.go b/process/mock/requestHandlerMock.go index 8f6f016ecd5..0ebe0e160d0 100644 --- a/process/mock/requestHandlerMock.go +++ b/process/mock/requestHandlerMock.go @@ -3,6 +3,7 @@ package mock type RequestHandlerMock struct { RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) RequestHeaderHandlerCalled func(destShardID uint32, hash []byte) RequestHeaderHandlerByNonceCalled func(destShardID uint32, nonce uint64) @@ -22,6 +23,13 @@ func (rrh *RequestHandlerMock) RequestUnsignedTransactions(destShardID uint32, t rrh.RequestScrHandlerCalled(destShardID, txHashes) } +func (rrh *RequestHandlerMock) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { + if rrh.RequestRewardTxHandlerCalled == nil { + return + } + rrh.RequestRewardTxHandlerCalled(destShardID, txHashes) +} + func (rrh *RequestHandlerMock) RequestMiniBlock(shardId uint32, miniblockHash []byte) { if rrh.RequestMiniBlockHandlerCalled == nil { return diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go new file mode 100644 index 00000000000..883879e6b56 --- /dev/null +++ b/process/mock/rewardTxProcessorMock.go @@ -0,0 +1,24 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/rewardTx" +) + +type RewardTxProcessorMock struct { + ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error +} + +func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if scrp.ProcessRewardTransactionCalled == nil { + return nil + } + + return scrp.ProcessRewardTransactionCalled(rTx) +} + +func (scrp *RewardTxProcessorMock) IsInterfaceNil() bool { + if scrp == nil { + return true + } + return false +} diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..661756611ef --- /dev/null +++ b/process/mock/specialAddressHandlerMock.go @@ -0,0 +1,148 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type SpecialAddressHandlerMock struct { + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData +} + +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } +} + +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + } + + return nil +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + }) + + return nil +} + +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn") + } + + return sh.BurnAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond") + } + + return sh.ElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) Round() uint64 { + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Epoch +} + +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { + if sh.LeaderAddressCalled == nil { + return []byte("leader") + } + + return sh.LeaderAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } + + return sh.ShardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} diff --git a/process/mock/txTypeHandlerMock.go b/process/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..76cedc35360 --- /dev/null +++ b/process/mock/txTypeHandlerMock.go @@ -0,0 +1,26 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7e7175bdbff --- /dev/null +++ b/process/mock/unsignedTxHandlerMock.go @@ -0,0 +1,61 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/export_test.go b/process/rewardTransaction/export_test.go new file mode 100644 index 00000000000..301fd02f5f8 --- /dev/null +++ b/process/rewardTransaction/export_test.go @@ -0,0 +1,21 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// Hasher will return the hasher of InterceptedRewardTransaction for using in test files +func (irt *InterceptedRewardTransaction) Hasher() hashing.Hasher { + return irt.hasher +} + +// Marshalizer will return the marshalizer of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) Marshalizer() marshal.Marshalizer { + return rti.marshalizer +} + +// BroadcastCallbackHandler will call the broadcast callback handler of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) BroadcastCallbackHandler(buffToSend []byte) { + rti.broadcastCallbackHandler(buffToSend) +} diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go new file mode 100644 index 00000000000..eb4cc1157df --- /dev/null +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -0,0 +1,149 @@ +package rewardTransaction + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// InterceptedRewardTransaction holds and manages a transaction based struct with extended functionality +type InterceptedRewardTransaction struct { + rTx *rewardTx.RewardTx + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConv state.AddressConverter + coordinator sharding.Coordinator + hash []byte + rcvShard uint32 + sndShard uint32 + isAddressedToOtherShards bool +} + +// NewInterceptedRewardTransaction returns a new instance of InterceptedRewardTransaction +func NewInterceptedRewardTransaction( + rewardTxBuff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConv state.AddressConverter, + coordinator sharding.Coordinator, +) (*InterceptedRewardTransaction, error) { + + if rewardTxBuff == nil { + return nil, process.ErrNilBuffer + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addrConv == nil || addrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + + rTx := &rewardTx.RewardTx{} + err := marshalizer.Unmarshal(rTx, rewardTxBuff) + if err != nil { + return nil, err + } + + inRewardTx := &InterceptedRewardTransaction{ + rTx: rTx, + marshalizer: marshalizer, + hasher: hasher, + addrConv: addrConv, + coordinator: coordinator, + } + + err = inRewardTx.processFields(rewardTxBuff) + if err != nil { + return nil, err + } + + err = inRewardTx.integrity() + if err != nil { + return nil, err + } + + err = inRewardTx.verifyIfNotarized(inRewardTx.hash) + if err != nil { + return nil, err + } + + return inRewardTx, nil +} + +func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) error { + inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) + + rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) + if err != nil { + return process.ErrInvalidRcvAddr + } + + inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) + inRTx.sndShard = inRTx.rTx.ShardId + + inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && + inRTx.sndShard != inRTx.coordinator.SelfId() + + return nil +} + +// integrity checks for not nil fields and negative value +func (inRTx *InterceptedRewardTransaction) integrity() error { + if len(inRTx.rTx.RcvAddr) == 0 { + return process.ErrNilRcvAddr + } + + if inRTx.rTx.Value == nil { + return process.ErrNilValue + } + + if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { + return process.ErrNegativeValue + } + + return nil +} + +// verifyIfNotarized checks if the rewardTx was already notarized +func (inRTx *InterceptedRewardTransaction) verifyIfNotarized(rTxBuff []byte) error { + // TODO: implement this for flood protection purposes + // could verify if the epoch/round is behind last committed metachain block + return nil +} + +// RcvShard returns the receiver shard +func (inRTx *InterceptedRewardTransaction) RcvShard() uint32 { + return inRTx.rcvShard +} + +// SndShard returns the sender shard +func (inRTx *InterceptedRewardTransaction) SndShard() uint32 { + return inRTx.sndShard +} + +// IsAddressedToOtherShards returns true if this transaction is not meant to be processed by the node from this shard +func (inRTx *InterceptedRewardTransaction) IsAddressedToOtherShards() bool { + return inRTx.isAddressedToOtherShards +} + +// RewardTransaction returns the reward transaction pointer that actually holds the data +func (inRTx *InterceptedRewardTransaction) RewardTransaction() data.TransactionHandler { + return inRTx.rTx +} + +// Hash gets the hash of this transaction +func (inRTx *InterceptedRewardTransaction) Hash() []byte { + return inRTx.hash +} diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go new file mode 100644 index 00000000000..ea1858af2c5 --- /dev/null +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -0,0 +1,149 @@ +package rewardTransaction_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedRewardTransaction_NilTxBuffShouldErr(t *testing.T) { + t.Parallel() + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilBuffer, err) +} + +func TestNewInterceptedRewardTransaction_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + nil, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptedRewardTransaction_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewInterceptedRewardTransaction_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewInterceptedRewardTransaction_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + nil) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewInterceptedRewardTransaction_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, irt) + assert.Nil(t, err) +} + +func TestNewInterceptedRewardTransaction_TestGetters(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: shardId, + } + + marshalizer := &mock.MarshalizerMock{} + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return shardId + } + + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + shardCoord) + + assert.NotNil(t, irt) + assert.Nil(t, err) + + assert.Equal(t, shardId, irt.RcvShard()) + assert.Equal(t, shardId, irt.SndShard()) + assert.Equal(t, &rewTx, irt.RewardTransaction()) + assert.False(t, irt.IsAddressedToOtherShards()) + + txHash := irt.Hasher().Compute(string(txBuff)) + assert.Equal(t, txHash, irt.Hash()) +} diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go new file mode 100644 index 00000000000..2707c4ca34d --- /dev/null +++ b/process/rewardTransaction/interceptor.go @@ -0,0 +1,151 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.DefaultLogger() + +// RewardTxInterceptor is used for intercepting reward transactions and storing them into a datapool +type RewardTxInterceptor struct { + marshalizer marshal.Marshalizer + rewardTxPool dataRetriever.ShardedDataCacherNotifier + rewardTxStorer storage.Storer + addrConverter state.AddressConverter + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + broadcastCallbackHandler func(buffToSend []byte) +} + +// NewRewardTxInterceptor hooks a new interceptor for reward transactions +func NewRewardTxInterceptor( + marshalizer marshal.Marshalizer, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + rewardTxStorer storage.Storer, + addrConverter state.AddressConverter, + hasher hashing.Hasher, + shardCoordinator sharding.Coordinator, +) (*RewardTxInterceptor, error) { + + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if rewardTxStorer == nil || rewardTxStorer.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxStorage + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + + rewardTxIntercept := &RewardTxInterceptor{ + marshalizer: marshalizer, + rewardTxPool: rewardTxPool, + rewardTxStorer: rewardTxStorer, + hasher: hasher, + addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + } + + return rewardTxIntercept, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + if message == nil || message.IsInterfaceNil() { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + rewardTxsBuff := make([][]byte, 0) + err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) + if err != nil { + return err + } + if len(rewardTxsBuff) == 0 { + return process.ErrNoRewardTransactionInMessage + } + + filteredRTxBuffs := make([][]byte, 0) + lastErrEncountered := error(nil) + for _, rewardTxBuff := range rewardTxsBuff { + rewardTxIntercepted, err := NewInterceptedRewardTransaction( + rewardTxBuff, + rti.marshalizer, + rti.hasher, + rti.addrConverter, + rti.shardCoordinator) + + if err != nil { + lastErrEncountered = err + continue + } + + //reward tx is validated, add it to filtered out reward txs + filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) + if rewardTxIntercepted.IsAddressedToOtherShards() { + log.Debug("intercepted reward transaction is for other shards") + + continue + } + + go rti.processRewardTransaction(rewardTxIntercepted) + } + + var buffToSend []byte + filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil + if filteredOutRTxsNeedToBeSend { + buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) + if err != nil { + return err + } + } + + if rti.broadcastCallbackHandler != nil { + rti.broadcastCallbackHandler(buffToSend) + } + + return lastErrEncountered +} + +// SetBroadcastCallback sets the callback method to send filtered out message +func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend []byte)) { + rti.broadcastCallbackHandler = callback +} + +func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { + cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) + rti.rewardTxPool.AddData( + rTx.Hash(), + rTx.RewardTransaction(), + cacherIdentifier, + ) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rti *RewardTxInterceptor) IsInterfaceNil() bool { + if rti == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/interceptor_test.go b/process/rewardTransaction/interceptor_test.go new file mode 100644 index 00000000000..5374d7cc68c --- /dev/null +++ b/process/rewardTransaction/interceptor_test.go @@ -0,0 +1,280 @@ +package rewardTransaction_test + +import ( + "encoding/json" + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + nil, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxPoolShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + nil, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxStorerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + nil, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardsTxStorage, err) +} + +func TestNewRewardTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + nil, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + nil) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, rti) + assert.Nil(t, err) + assert.False(t, rti.IsInterfaceNil()) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + err := rti.ProcessReceivedMessage(nil) + assert.Equal(t, process.ErrNilMessage, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + message := &mock.P2PMessageMock{ + DataField: nil, + } + + err := rti.ProcessReceivedMessage(message) + assert.Equal(t, process.ErrNilDataToProcess, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testing.T) { + t.Parallel() + + wasCalled := int32(0) + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + atomic.StoreInt32(&wasCalled, 1) + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + } + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + } + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + + assert.Nil(t, err) + assert.Equal(t, int32(1), atomic.LoadInt32(&wasCalled)) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *testing.T) { + t.Parallel() + + wasCalled := int32(0) + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(1) + } + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + atomic.StoreInt32(&wasCalled, 1) + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + shardCoord) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 1, + } + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 1, + } + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + assert.Nil(t, err) + // check that AddData was not called, as tx is cross shard + assert.Equal(t, int32(0), atomic.LoadInt32(&wasCalled)) +} + +func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + bytesToSend := []byte("test") + var bytesToReceive []byte + rti.SetBroadcastCallback(func(buffToSend []byte) { + bytesToReceive = buffToSend + return + }) + + rti.BroadcastCallbackHandler(bytesToSend) + assert.Equal(t, bytesToSend, bytesToReceive) +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go new file mode 100644 index 00000000000..9f0002826ff --- /dev/null +++ b/process/rewardTransaction/process.go @@ -0,0 +1,115 @@ +package rewardTransaction + +import ( + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type rewardTxProcessor struct { + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator + + mutRewardsForwarder sync.Mutex + rewardTxForwarder process.IntermediateTransactionHandler +} + +// NewRewardTxProcessor creates a rewardTxProcessor instance +func NewRewardTxProcessor( + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, + rewardTxForwarder process.IntermediateTransactionHandler, +) (*rewardTxProcessor, error) { + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + if rewardTxForwarder == nil { + return nil, process.ErrNilIntermediateTransactionHandler + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + rewardTxForwarder: rewardTxForwarder, + }, nil +} + +func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { + addr, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForAddr := rtp.shardCoordinator.ComputeId(addr) + if shardForCurrentNode != shardForAddr { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(addr) + if err != nil { + return nil, err + } + + return acnt, nil +} + +// ProcessRewardTransaction updates the account state from the reward transaction +func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if rTx == nil { + return process.ErrNilRewardTransaction + } + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + rtp.mutRewardsForwarder.Lock() + err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + if err != nil { + return err + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + + if accHandler == nil || accHandler.IsInterfaceNil() { + // address from different shard + return nil + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + + return err +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxProcessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go new file mode 100644 index 00000000000..800940e1431 --- /dev/null +++ b/process/rewardTransaction/process_test.go @@ -0,0 +1,287 @@ +package rewardTransaction_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxProcessor_NilAccountsDbShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxProcessor_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxProcessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxProcessor_NilRewardTxForwarderShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) +} + +func TestNewRewardTxProcessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.NotNil(t, rtp) + assert.Nil(t, err) + assert.False(t, rtp.IsInterfaceNil()) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + err := rtp.ProcessRewardTransaction(nil) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxValueShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{Value: nil} + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrNilValueFromRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotCreateAddressShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot create address") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterStub{ + CreateAddressFromPublicKeyBytesCalled: func(pubKey []byte) (state.AddressContainer, error) { + return nil, expectedErr + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionAddressNotInNodesShardShouldNotExecute(t *testing.T) { + t.Parallel() + + getAccountWithJournalWasCalled := false + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(5) + } + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + getAccountWithJournalWasCalled = true + return nil, nil + }, + }, + &mock.AddressConverterMock{}, + shardCoord, + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + // account should not be requested as the address is not in node's shard + assert.False(t, getAccountWithJournalWasCalled) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotGetAccountShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot get account") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return nil, expectedErr + }, + }, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotAddIntermediateTxsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot add intermediate transactions") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{ + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + return expectedErr + }, + }) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionWrongTypeAssertionAccountHolderShouldErr(t *testing.T) { + t.Parallel() + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return mock.NewAccountWrapMock(addressContainer, &mock.AccountTrackerStub{}), nil + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { + t.Parallel() + + journalizeWasCalled := false + saveAccountWasCalled := false + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + ats := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeWasCalled = true + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountWasCalled = true + return nil + }, + } + return state.NewAccount(addressContainer, ats) + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + assert.True(t, journalizeWasCalled) + assert.True(t, saveAccountWasCalled) +} diff --git a/process/smartContract/export_test.go b/process/smartContract/export_test.go index 5ea1d1bcad3..d22eaef1f23 100644 --- a/process/smartContract/export_test.go +++ b/process/smartContract/export_test.go @@ -22,11 +22,21 @@ func (sc *scProcessor) CreateVMInput(tx *transaction.Transaction) (*vmcommon.VMI return sc.createVMInput(tx) } -func (sc *scProcessor) ProcessVMOutput(vmOutput *vmcommon.VMOutput, tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64) ([]data.TransactionHandler, error) { +func (sc *scProcessor) ProcessVMOutput( + vmOutput *vmcommon.VMOutput, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, +) ([]data.TransactionHandler, *big.Int, error) { return sc.processVMOutput(vmOutput, tx, acntSnd, round) } -func (sc *scProcessor) RefundGasToSender(gasRefund *big.Int, tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler) (*smartContractResult.SmartContractResult, error) { +func (sc *scProcessor) RefundGasToSender( + gasRefund *big.Int, + tx *transaction.Transaction, + txHash []byte, + acntSnd state.AccountHandler, +) (*smartContractResult.SmartContractResult, *big.Int, error) { return sc.refundGasToSender(gasRefund, tx, txHash, acntSnd) } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 2dc1d262b3a..8506fada4f0 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -17,7 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) type scExecutionState struct { @@ -41,6 +41,7 @@ type scProcessor struct { mapExecState map[uint64]scExecutionState scrForwarder process.IntermediateTransactionHandler + txFeeHandler process.TransactionFeeHandler } var log = logger.DefaultLogger() @@ -56,6 +57,7 @@ func NewSmartContractProcessor( adrConv state.AddressConverter, coordinator sharding.Coordinator, scrForwarder process.IntermediateTransactionHandler, + txFeeHandler process.TransactionFeeHandler, ) (*scProcessor, error) { if vmContainer == nil || vmContainer.IsInterfaceNil() { return nil, process.ErrNoVM @@ -84,6 +86,9 @@ func NewSmartContractProcessor( if scrForwarder == nil || scrForwarder.IsInterfaceNil() { return nil, process.ErrNilIntermediateTransactionHandler } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } return &scProcessor{ vmContainer: vmContainer, @@ -95,6 +100,7 @@ func NewSmartContractProcessor( adrConv: adrConv, shardCoordinator: coordinator, scrForwarder: scrForwarder, + txFeeHandler: txFeeHandler, mapExecState: make(map[uint64]scExecutionState)}, nil } @@ -186,7 +192,7 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( } // VM is formally verified and the output is correct - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -196,6 +202,8 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( return err } + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + return nil } @@ -281,7 +289,7 @@ func (sc *scProcessor) DeploySmartContract( return err } - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -291,6 +299,8 @@ func (sc *scProcessor) DeploySmartContract( return err } + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + return nil } @@ -397,6 +407,10 @@ func (sc *scProcessor) processSCPayment(tx *transaction.Transaction, acntSnd sta return process.ErrWrongTypeAssertion } + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + totalCost := big.NewInt(0) err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) if err != nil { @@ -411,23 +425,23 @@ func (sc *scProcessor) processVMOutput( tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64, -) ([]data.TransactionHandler, error) { +) ([]data.TransactionHandler, *big.Int, error) { if vmOutput == nil { - return nil, process.ErrNilVMOutput + return nil, nil, process.ErrNilVMOutput } if tx == nil { - return nil, process.ErrNilTransaction + return nil, nil, process.ErrNilTransaction } txBytes, err := sc.marshalizer.Marshal(tx) if err != nil { - return nil, err + return nil, nil, err } txHash := sc.hasher.Compute(string(txBytes)) err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) if err != nil { - return nil, err + return nil, nil, err } if vmOutput.ReturnCode != vmcommon.Ok { @@ -439,38 +453,38 @@ func (sc *scProcessor) processVMOutput( stAcc, ok := acntSnd.(*state.Account) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } totalCost := big.NewInt(0) err = stAcc.SetBalanceWithJournal(totalCost.Add(stAcc.Balance, tx.Value)) if err != nil { - return nil, err + return nil, nil, err } - return nil, nil + return nil, nil, nil } err = sc.processSCOutputAccounts(vmOutput.OutputAccounts, tx) if err != nil { - return nil, err + return nil, nil, err } scrTxs, err := sc.createSCRTransactions(vmOutput.OutputAccounts, tx, txHash) if err != nil { - return nil, err + return nil, nil, err } acntSnd, err = sc.reloadLocalSndAccount(acntSnd) if err != nil { - return nil, err + return nil, nil, err } totalGasRefund := big.NewInt(0) totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) - scrRefund, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) + scrRefund, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) if err != nil { - return nil, err + return nil, nil, err } if scrRefund != nil { @@ -479,15 +493,15 @@ func (sc *scProcessor) processVMOutput( err = sc.deleteAccounts(vmOutput.DeletedAccounts) if err != nil { - return nil, err + return nil, nil, err } err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) if err != nil { - return nil, err + return nil, nil, err } - return scrTxs, nil + return scrTxs, consumedFee, nil } // reloadLocalSndAccount will reload from current account state the sender account @@ -545,13 +559,16 @@ func (sc *scProcessor) refundGasToSender( tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler, -) (*smartContractResult.SmartContractResult, error) { +) (*smartContractResult.SmartContractResult, *big.Int, error) { + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { - return nil, nil + return nil, consumedFee, nil } refundErd := big.NewInt(0) refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) + consumedFee = consumedFee.Sub(consumedFee, refundErd) scTx := &smartContractResult.SmartContractResult{} scTx.Value = refundErd @@ -561,21 +578,21 @@ func (sc *scProcessor) refundGasToSender( scTx.TxHash = txHash if acntSnd == nil || acntSnd.IsInterfaceNil() { - return scTx, nil + return scTx, consumedFee, nil } stAcc, ok := acntSnd.(*state.Account) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) err := stAcc.SetBalanceWithJournal(newBalance) if err != nil { - return nil, err + return nil, nil, err } - return scTx, nil + return scTx, consumedFee, nil } // save account changes in state from vmOutput - protected by VM - every output can be treated as is. diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 66c947ca098..652a3bda2a2 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -10,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" @@ -65,7 +66,9 @@ func TestNewSmartContractProcessorNilVM(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNoVM, err) @@ -83,7 +86,9 @@ func TestNewSmartContractProcessorNilArgsParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilArgumentParser, err) @@ -101,7 +106,9 @@ func TestNewSmartContractProcessorNilHasher(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilHasher, err) @@ -119,7 +126,9 @@ func TestNewSmartContractProcessorNilMarshalizer(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -137,7 +146,9 @@ func TestNewSmartContractProcessorNilAccountsDB(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -155,7 +166,9 @@ func TestNewSmartContractProcessorNilAdrConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, nil, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -173,7 +186,9 @@ func TestNewSmartContractProcessorNilShardCoordinator(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, nil, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -191,7 +206,9 @@ func TestNewSmartContractProcessorNilFakeAccountsHandler(t *testing.T) { nil, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilTemporaryAccountsHandler, err) @@ -209,7 +226,9 @@ func TestNewSmartContractProcessor_NilIntermediateMock(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - nil) + nil, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) @@ -227,7 +246,9 @@ func TestNewSmartContractProcessor(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -245,7 +266,9 @@ func TestScProcessor_ComputeTransactionTypeNil(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -266,7 +289,9 @@ func TestScProcessor_ComputeTransactionTypeNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -294,7 +319,9 @@ func TestScProcessor_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -313,18 +340,18 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { t.Parallel() addressConverter := &mock.AddressConverterMock{} - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, + + txTypeHandler, err := coordinator.NewTxTypeHandler( addressConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return nil, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) tx := &transaction.Transaction{} @@ -334,7 +361,7 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { tx.Data = "data" tx.Value = big.NewInt(45) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCDeployment, txType) } @@ -353,23 +380,20 @@ func TestScProcessor_ComputeTransactionTypeScInvoking(t *testing.T) { _, acntDst := createAccounts(tx) acntDst.SetCode([]byte("code")) - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCInvoking, txType) } @@ -387,23 +411,20 @@ func TestScProcessor_ComputeTransactionTypeMoveBalance(t *testing.T) { _, acntDst := createAccounts(tx) - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.MoveBalance, txType) } @@ -423,7 +444,9 @@ func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -458,7 +481,9 @@ func TestScProcessor_DeploySmartContractRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -505,7 +530,9 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -537,7 +564,9 @@ func TestScProcessor_DeploySmartContract(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -578,7 +607,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -608,7 +639,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -646,7 +679,9 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -681,7 +716,9 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -722,7 +759,9 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -757,7 +796,9 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -791,7 +832,9 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -821,7 +864,9 @@ func TestScProcessor_CreateVMDeployInputBadFunction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -863,7 +908,9 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -901,7 +948,9 @@ func TestScProcessor_CreateVMDeployInputNotEnoughArguments(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -932,7 +981,9 @@ func TestScProcessor_CreateVMInputWrongArgument(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -966,7 +1017,9 @@ func TestScProcessor_CreateVMInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1009,13 +1062,15 @@ func TestScProcessor_processVMOutputNilVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, tx := createAccountsAndTransaction() - _, err = sc.processVMOutput(nil, tx, acntSrc, 10) + _, _, err = sc.processVMOutput(nil, tx, acntSrc, 10) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -1033,14 +1088,16 @@ func TestScProcessor_processVMOutputNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, _ := createAccountsAndTransaction() vmOutput := &vmcommon.VMOutput{} - _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) + _, _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) assert.Equal(t, process.ErrNilTransaction, err) } @@ -1058,7 +1115,9 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1068,7 +1127,7 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { GasRefund: big.NewInt(0), GasRemaining: big.NewInt(0), } - _, err = sc.processVMOutput(vmOutput, tx, nil, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, nil, 10) assert.Nil(t, err) } @@ -1087,7 +1146,9 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1103,7 +1164,7 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { } tx.Value = big.NewInt(0) - _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) assert.Nil(t, err) } @@ -1132,7 +1193,9 @@ func TestScProcessor_GetAccountFromAddressAccNotFound(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1170,7 +1233,9 @@ func TestScProcessor_GetAccountFromAddrFaildAddressConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1208,7 +1273,9 @@ func TestScProcessor_GetAccountFromAddrFailedGetExistingAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1246,7 +1313,9 @@ func TestScProcessor_GetAccountFromAddrAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1285,7 +1354,9 @@ func TestScProcessor_GetAccountFromAddr(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1325,7 +1396,9 @@ func TestScProcessor_DeleteAccountsFailedAtRemove(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1365,7 +1438,9 @@ func TestScProcessor_DeleteAccountsNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1410,7 +1485,9 @@ func TestScProcessor_DeleteAccountsInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1434,7 +1511,9 @@ func TestScProcessor_ProcessSCPaymentAccNotInShardShouldNotReturnError(t *testin &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1464,7 +1543,9 @@ func TestScProcessor_ProcessSCPaymentWrongTypeAssertion(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1486,6 +1567,45 @@ func TestScProcessor_ProcessSCPaymentWrongTypeAssertion(t *testing.T) { assert.Equal(t, process.ErrWrongTypeAssertion, err) } +func TestScProcessor_ProcessSCPaymentNotEnoughBalance(t *testing.T) { + t.Parallel() + + sc, err := NewSmartContractProcessor( + &mock.VMContainerMock{}, + &mock.ArgumentParserMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + &mock.TemporaryAccountsHandlerMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + assert.NotNil(t, sc) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 1 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + + tx.Value = big.NewInt(45) + tx.GasPrice = 10 + tx.GasLimit = 15 + + acntSrc, _ := createAccounts(tx) + stAcc, _ := acntSrc.(*state.Account) + stAcc.Balance = big.NewInt(45) + + currBalance := acntSrc.(*state.Account).Balance.Uint64() + + err = sc.ProcessSCPayment(tx, acntSrc) + assert.Equal(t, process.ErrInsufficientFunds, err) + assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) +} + func TestScProcessor_ProcessSCPayment(t *testing.T) { t.Parallel() @@ -1498,7 +1618,9 @@ func TestScProcessor_ProcessSCPayment(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1533,7 +1655,9 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1552,11 +1676,11 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { acntSrc, _ := createAccounts(tx) currBalance := acntSrc.(*state.Account).Balance.Uint64() - _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) - _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) } @@ -1573,7 +1697,9 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1589,17 +1715,19 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { txHash := []byte("txHash") acntSrc, _ := createAccounts(tx) - sctx, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) + sctx, consumed, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) acntSrc = nil - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) badAcc := &mock.AccountWrapMock{} - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) assert.Equal(t, process.ErrWrongTypeAssertion, err) assert.Nil(t, sctx) } @@ -1616,7 +1744,9 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1634,7 +1764,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { currBalance := acntSrc.(*state.Account).Balance.Uint64() refundGas := big.NewInt(10) - _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) assert.Nil(t, err) totalRefund := refundGas.Uint64() * tx.GasPrice @@ -1656,11 +1786,13 @@ func TestScProcessor_processVMOutputNilOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) - _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -1680,12 +1812,14 @@ func TestScProcessor_processVMOutputNilTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) vmOutput := &vmcommon.VMOutput{} - _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) assert.Equal(t, process.ErrNilTransaction, err) } @@ -1706,7 +1840,9 @@ func TestScProcessor_processVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1720,7 +1856,7 @@ func TestScProcessor_processVMOutput(t *testing.T) { } tx.Value = big.NewInt(0) - _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) assert.Nil(t, err) } @@ -1740,7 +1876,9 @@ func TestScProcessor_processSCOutputAccounts(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1819,7 +1957,9 @@ func TestScProcessor_processSCOutputAccountsNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1858,7 +1998,9 @@ func TestScProcessor_CreateCrossShardTransactions(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1901,7 +2043,9 @@ func TestScProcessor_ProcessSmartContractResultNilScr(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1927,7 +2071,9 @@ func TestScProcessor_ProcessSmartContractResultErrGetAccount(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1951,7 +2097,9 @@ func TestScProcessor_ProcessSmartContractResultAccNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1980,7 +2128,9 @@ func TestScProcessor_ProcessSmartContractResultBadAccType(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2012,7 +2162,9 @@ func TestScProcessor_ProcessSmartContractResultOutputBalanceNil(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2050,7 +2202,9 @@ func TestScProcessor_ProcessSmartContractResultWithCode(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2092,7 +2246,9 @@ func TestScProcessor_ProcessSmartContractResultWithData(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index dc801fb3c3a..11a219970d0 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -74,6 +74,7 @@ type baseBootstrap struct { chStopSync chan bool waitTime time.Duration + mutNodeSynched sync.RWMutex isNodeSynchronized bool hasLastBlock bool roundIndex int64 @@ -432,6 +433,9 @@ func (boot *baseBootstrap) waitForHeaderHash() error { // is not synchronized yet and it has to continue the bootstrapping mechanism, otherwise the node is already // synched and it can participate to the consensus, if it is in the jobDone group of this rounder func (boot *baseBootstrap) ShouldSync() bool { + boot.mutNodeSynched.Lock() + defer boot.mutNodeSynched.Unlock() + isNodeSynchronizedInCurrentRound := boot.roundIndex == boot.rounder.Index() && boot.isNodeSynchronized if isNodeSynchronizedInCurrentRound { return false diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index edd3309d0bc..8593e051f1f 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -29,3 +29,15 @@ func (txProc *txProcessor) MoveBalances(acntSrc, acntDst *state.Account, value * func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { return txProc.increaseNonce(acntSrc) } + +func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { + mutTxFee.Lock() + minTxFee = minTxFee + mutTxFee.Unlock() +} + +func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { + mutTxFee.Lock() + minGasPrice = minGasPrice + mutTxFee.Unlock() +} diff --git a/process/transaction/process.go b/process/transaction/process.go index f7ebc49c800..9a6ca6971ec 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -3,6 +3,7 @@ package transaction import ( "bytes" "math/big" + "sync" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data/state" @@ -15,6 +16,14 @@ import ( var log = logger.DefaultLogger() +// minGasPrice is the minimal gas price to be paid for any transaction +// TODO: Set minGasPrice and minTxFee to some positive value (TBD) +var minGasPrice = uint64(0) + +// minTxFee is the minimal fee to be paid for any transaction +var minTxFee = uint64(0) +var mutTxFee sync.RWMutex + // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { accounts state.AccountsAdapter @@ -22,7 +31,9 @@ type txProcessor struct { hasher hashing.Hasher scProcessor process.SmartContractProcessor marshalizer marshal.Marshalizer + txFeeHandler process.TransactionFeeHandler shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler } // NewTxProcessor creates a new txProcessor engine @@ -33,6 +44,8 @@ func NewTxProcessor( marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, scProcessor process.SmartContractProcessor, + txFeeHandler process.TransactionFeeHandler, + txTypeHandler process.TxTypeHandler, ) (*txProcessor, error) { if accounts == nil || accounts.IsInterfaceNil() { @@ -53,6 +66,12 @@ func NewTxProcessor( if scProcessor == nil || scProcessor.IsInterfaceNil() { return nil, process.ErrNilSmartContractProcessor } + if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + return nil, process.ErrNilUnsignedTxHandler + } + if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { + return nil, process.ErrNilTxTypeHandler + } return &txProcessor{ accounts: accounts, @@ -61,12 +80,14 @@ func NewTxProcessor( marshalizer: marshalizer, shardCoordinator: shardCoordinator, scProcessor: scProcessor, + txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint64) error { - if tx == nil { + if tx == nil || tx.IsInterfaceNil() { return process.ErrNilTransaction } @@ -85,7 +106,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return err } - txType, err := txProc.scProcessor.ComputeTransactionType(tx) + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) if err != nil { return err } @@ -102,6 +123,38 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return process.ErrWrongTransaction } +func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + mutTxFee.RLock() + minFee := big.NewInt(0) + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) + mutTxFee.RUnlock() + + if minFee.Cmp(cost) > 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + return cost, nil +} + func (txProc *txProcessor) processMoveBalance( tx *transaction.Transaction, adrSrc, adrDst state.AddressContainer, @@ -114,6 +167,11 @@ func (txProc *txProcessor) processMoveBalance( return err } + txFee, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } + value := tx.Value err = txProc.moveBalances(acntSrc, acntDst, value) @@ -129,6 +187,8 @@ func (txProc *txProcessor) processMoveBalance( } } + txProc.txFeeHandler.ProcessTransactionFee(txFee) + return nil } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 7f745b6ec49..014c039646c 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -7,11 +7,12 @@ import ( "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/smartContract" txproc "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/stretchr/testify/assert" ) @@ -51,6 +52,8 @@ func createTxProcessor() txproc.TxProcessor { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) return txProc @@ -68,6 +71,8 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -84,6 +89,8 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -100,6 +107,8 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -116,6 +125,8 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { nil, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -132,6 +143,8 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, nil, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -148,12 +161,32 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), nil, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) assert.Nil(t, txProc) } +func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txProc, err := txproc.NewTxProcessor( + &mock.AccountsStub{}, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + nil, + &mock.TxTypeHandlerMock{}, + ) + + assert.Equal(t, process.ErrNilUnsignedTxHandler, err) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -164,6 +197,8 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) assert.Nil(t, err) @@ -184,6 +219,8 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) addressConv.Fail = true @@ -223,6 +260,8 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -247,6 +286,8 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -288,6 +329,8 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -338,6 +381,8 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -373,6 +418,8 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -399,6 +446,8 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -639,6 +688,8 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) addressConv.Fail = true @@ -659,6 +710,8 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) tx := transaction.Transaction{} @@ -695,6 +748,8 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -746,6 +801,8 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -789,12 +846,16 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t *testing.T) { @@ -842,8 +903,12 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, 1, journalizeCalled) @@ -895,6 +960,8 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -942,15 +1009,72 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) + err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, uint64(5), acntSrc.Nonce) assert.Equal(t, big.NewInt(29), acntSrc.Balance) assert.Equal(t, big.NewInt(71), acntDst.Balance) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) +} + +func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + tx := transaction.Transaction{} + tx.Nonce = 4 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(61) + tx.GasPrice = 2 + tx.GasLimit = 2 + + acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + assert.Nil(t, err) + acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + assert.Nil(t, err) + + acntSrc.Nonce = 4 + acntSrc.Balance = big.NewInt(90) + acntDst.Balance = big.NewInt(10) + + accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) + + execTx, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + ) + + err = execTx.ProcessTransaction(&tx, 4) + assert.Nil(t, err) + assert.Equal(t, uint64(5), acntSrc.Nonce) + assert.Equal(t, big.NewInt(25), acntSrc.Balance) + assert.Equal(t, big.NewInt(71), acntDst.Balance) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { @@ -975,6 +1099,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) tx.Value = big.NewInt(45) + tx.GasPrice = 1 + tx.GasLimit = 1 acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) assert.Nil(t, err) @@ -982,25 +1108,12 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) assert.Nil(t, err) - acntSrc.Balance = big.NewInt(45) + acntSrc.Balance = big.NewInt(46) acntDst.SetCode([]byte{65}) accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}, - ) - scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType + wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true @@ -1014,6 +1127,12 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }, + }, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1055,19 +1174,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true @@ -1081,6 +1189,10 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }}, ) err = execTx.ProcessTransaction(&tx, 4) @@ -1132,24 +1244,18 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true return process.ErrNoVM } + computeType, _ := coordinator.NewTxTypeHandler( + &mock.AddressConverterMock{}, + shardCoordinator, + accounts) + execTx, _ := txproc.NewTxProcessor( accounts, mock.HasherMock{}, @@ -1157,11 +1263,16 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod &mock.MarshalizerMock{}, shardCoordinator, scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + computeType, ) + execTx.SetMinTxFee(0) + execTx.SetMinGasPrice(0) + err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.False(t, wasCalled) - assert.Equal(t, 2, journalizeCalled) - assert.Equal(t, 2, saveAccountCalled) + assert.Equal(t, 3, journalizeCalled) + assert.Equal(t, 3, saveAccountCalled) } diff --git a/sharding/errors.go b/sharding/errors.go index 5405d196ef2..71820dfa26a 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -13,6 +13,9 @@ var ErrInvalidShardId = errors.New("shard id must be smaller than the total numb // ErrShardIdOutOfRange signals an error when shard id is out of range var ErrShardIdOutOfRange = errors.New("shard id out of range") +// ErrNilPubKey signals that the public key is nil +var ErrNilPubKey = errors.New("nil public key") + // ErrNoPubKeys signals an error when public keys are missing var ErrNoPubKeys = errors.New("no public keys defined") @@ -28,6 +31,9 @@ var ErrNilAddressConverter = errors.New("trying to set nil address converter") // ErrCouldNotParsePubKey signals that a given public key could not be parsed var ErrCouldNotParsePubKey = errors.New("could not parse node's public key") +// ErrCouldNotParseAddress signals that a given address could not be parsed +var ErrCouldNotParseAddress = errors.New("could not parse node's address") + // ErrNegativeOrZeroConsensusGroupSize signals that an invalid consensus group size has been provided var ErrNegativeOrZeroConsensusGroupSize = errors.New("negative or zero consensus group size") @@ -36,3 +42,42 @@ var ErrMinNodesPerShardSmallerThanConsensusSize = errors.New("minimum nodes per // ErrNodesSizeSmallerThanMinNoOfNodes signals that there are not enough nodes defined in genesis file var ErrNodesSizeSmallerThanMinNoOfNodes = errors.New("length of nodes defined is smaller than min nodes per shard required") + +// ErrNilInputNodesMap signals that a nil nodes map was provided +var ErrNilInputNodesMap = errors.New("nil input nodes map") + +// ErrSmallShardEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallShardEligibleListSize = errors.New("small shard eligible list size") + +// ErrSmallMetachainEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallMetachainEligibleListSize = errors.New("small metachain eligible list size") + +// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) +var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") + +// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap +var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") + +// ErrEligibleTooManySelections signals an invalid selection for consensus group +var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") + +// ErrEligibleTooFewSelections signals an invalid selection for consensus group +var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") + +// ErrNilRandomness signals that a nil randomness source has been provided +var ErrNilRandomness = errors.New("nil randomness source") + +// ErrNilHasher signals that a nil hasher has been provided +var ErrNilHasher = errors.New("nil hasher") + +// ErrNilStake signals that a nil stake structure has been provided +var ErrNilStake = errors.New("nil stake") + +// ErrNegativeStake signals that the stake is negative +var ErrNegativeStake = errors.New("negative stake") + +// ErrNilAddress signals that the address is nil +var ErrNilAddress = errors.New("nil address") + +// ErrValidatorNotFound signals that the validator has not been found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/sharding/export_test.go b/sharding/export_test.go index 4d6cd6c7e5e..cf6427cb891 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -24,10 +24,14 @@ func (ns *NodesSetup) ProcessMetaChainAssigment() { ns.processMetaChainAssigment() } -func (ns *NodesSetup) CreateInitialNodesPubKeys() { - ns.createInitialNodesPubKeys() +func (ns *NodesSetup) CreateInitialNodesInfo() { + ns.createInitialNodesInfo() } func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) string { return communicationIdentifierBetweenShards(shardId1, shardId2) } + +func (ihgs *indexHashedNodesCoordinator) EligibleList() []Validator { + return ihgs.nodesMap[ihgs.shardId] +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go new file mode 100644 index 00000000000..68c0a5ce50a --- /dev/null +++ b/sharding/indexHashedNodesCoordinator.go @@ -0,0 +1,296 @@ +package sharding + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/hashing" +) + +type indexHashedNodesCoordinator struct { + nbShards uint32 + shardId uint32 + hasher hashing.Hasher + nodesMap map[uint32][]Validator + shardConsensusGroupSize int + metaConsensusGroupSize int +} + +// NewIndexHashedNodesCoordinator creates a new index hashed group selector +func NewIndexHashedNodesCoordinator( + shardConsensusGroupSize int, + metaConsensusGroupSize int, + hasher hashing.Hasher, + shardId uint32, + nbShards uint32, + nodes map[uint32][]Validator, +) (*indexHashedNodesCoordinator, error) { + if shardConsensusGroupSize < 1 || metaConsensusGroupSize < 1 { + return nil, ErrInvalidConsensusGroupSize + } + + if nbShards < 1 { + return nil, ErrInvalidNumberOfShards + } + + if shardId >= nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + if hasher == nil { + return nil, ErrNilHasher + } + + ihgs := &indexHashedNodesCoordinator{ + nbShards: nbShards, + shardId: shardId, + hasher: hasher, + nodesMap: make(map[uint32][]Validator), + shardConsensusGroupSize: shardConsensusGroupSize, + metaConsensusGroupSize: metaConsensusGroupSize, + } + + err := ihgs.SetNodesPerShards(nodes) + if err != nil { + return nil, err + } + + return ihgs, nil +} + +// SetNodesPerShards loads the distribution of nodes per shard into the nodes management component +func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Validator) error { + if nodes == nil { + return ErrNilInputNodesMap + } + + nodesList, ok := nodes[MetachainShardId] + if ok && len(nodesList) < ihgs.metaConsensusGroupSize { + return ErrSmallMetachainEligibleListSize + } + + for shardId := uint32(0); shardId < ihgs.nbShards; shardId++ { + nbNodesShard := len(nodes[shardId]) + if nbNodesShard < ihgs.shardConsensusGroupSize { + return ErrSmallShardEligibleListSize + } + } + + ihgs.nodesMap = nodes + + return nil +} + +// ComputeValidatorsGroup will generate a list of validators based on the the eligible list, +// consensus group size and a randomness source +// Steps: +// 1. generate expanded eligible list by multiplying entries from shards' eligible list according to stake and rating -> TODO +// 2. for each value in [0, consensusGroupSize), compute proposedindex = Hash( [index as string] CONCAT randomness) % len(eligible list) +// 3. if proposed index is already in the temp validator list, then proposedIndex++ (and then % len(eligible list) as to not +// exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until +// the item at the new proposed index is not found in the list. This new proposed index will be called checked index +// 4. the item at the checked index is appended in the temp validator list +func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []Validator, err error) { + if randomness == nil { + return nil, ErrNilRandomness + } + + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + if ihgs == nil { + return nil, ErrNilRandomness + } + + tempList := make([]Validator, 0) + consensusSize := ihgs.consensusGroupSize(shardId) + randomness = []byte(fmt.Sprintf("%d-%s", round, core.ToB64(randomness))) + + // TODO: pre-compute eligible list and update only on rating change. + expandedList := ihgs.expandEligibleList(shardId) + lenExpandedList := len(expandedList) + + for startIdx := 0; startIdx < consensusSize; startIdx++ { + proposedIndex := ihgs.computeListIndex(startIdx, lenExpandedList, string(randomness)) + checkedIndex := ihgs.checkIndex(proposedIndex, expandedList, tempList) + tempList = append(tempList, expandedList[checkedIndex]) + } + + return tempList, nil +} + +// GetValidatorWithPublicKey gets the validator with the given public key +func (ihgs *indexHashedNodesCoordinator) GetValidatorWithPublicKey(publicKey []byte) (Validator, uint32, error) { + if publicKey == nil { + return nil, 0, ErrNilPubKey + } + + for shardId, shardEligible := range ihgs.nodesMap { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, ErrValidatorNotFound +} + +// GetValidatorsPublicKeys calculates the validators consensus group for a specific shard, randomness and round number, +// returning their public keys +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range consensusNodes { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +// GetValidatorsRewardsAddresses calculates the validator consensus group for a specific shard, randomness and round +// number, returning their staking/rewards addresses +func (ihgs *indexHashedNodesCoordinator) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, len(consensusNodes)) + for i, v := range consensusNodes { + addresses[i] = string(v.Address()) + } + + return addresses, nil +} + +// GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap +// TODO: This function needs to be revised when the requirements are clarified +func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte + shardEligibleLen := uint16(len(ihgs.nodesMap[shardId])) + invalidSelection := selectionLen < shardEligibleLen + + if invalidSelection { + return nil, ErrEligibleSelectionMismatch + } + + consensusSize := ihgs.consensusGroupSize(shardId) + publicKeys = make([]string, consensusSize) + cnt := 0 + + for i := uint16(0); i < shardEligibleLen; i++ { + isSelected := (selection[i/8] & (1 << (i % 8))) != 0 + + if !isSelected { + continue + } + + publicKeys[cnt] = string(ihgs.nodesMap[shardId][i].PubKey()) + cnt++ + + if cnt > consensusSize { + return nil, ErrEligibleTooManySelections + } + } + + if cnt < consensusSize { + return nil, ErrEligibleTooFewSelections + } + + return publicKeys, nil +} + +func (ihgs *indexHashedNodesCoordinator) expandEligibleList(shardId uint32) []Validator { + //TODO implement an expand eligible list variant + return ihgs.nodesMap[shardId] +} + +// computeListIndex computes a proposed index from expanded eligible list +func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, lenList int, randomSource string) int { + buffCurrentIndex := make([]byte, 8) + binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) + + indexHash := ihgs.hasher.Compute(string(buffCurrentIndex) + randomSource) + + computedLargeIndex := big.NewInt(0) + computedLargeIndex.SetBytes(indexHash) + lenExpandedEligibleList := big.NewInt(int64(lenList)) + + // computedListIndex = computedLargeIndex % len(expandedEligibleList) + computedListIndex := big.NewInt(0).Mod(computedLargeIndex, lenExpandedEligibleList).Int64() + + return int(computedListIndex) +} + +// checkIndex returns a checked index starting from a proposed index +func (ihgs *indexHashedNodesCoordinator) checkIndex( + proposedIndex int, + eligibleList []Validator, + selectedList []Validator, +) int { + + for { + v := eligibleList[proposedIndex] + + if ihgs.validatorIsInList(v, selectedList) { + proposedIndex++ + proposedIndex = proposedIndex % len(eligibleList) + continue + } + + return proposedIndex + } +} + +// validatorIsInList returns true if a validator has been found in provided list +func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []Validator) bool { + for i := 0; i < len(list); i++ { + if bytes.Equal(v.PubKey(), list[i].PubKey()) { + return true + } + } + + return false +} + +func (ihgs *indexHashedNodesCoordinator) consensusGroupSize(shardId uint32) int { + if shardId == MetachainShardId { + return ihgs.metaConsensusGroupSize + } + + return ihgs.shardConsensusGroupSize +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ihgs *indexHashedNodesCoordinator) IsInterfaceNil() bool { + if ihgs == nil { + return true + } + return false +} diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go new file mode 100644 index 00000000000..44cc4f0f587 --- /dev/null +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -0,0 +1,573 @@ +package sharding_test + +import ( + "encoding/binary" + "fmt" + "github.com/ElrondNetwork/elrond-go/core" + "math/big" + "strconv" + "testing" + + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding/mock" + "github.com/stretchr/testify/assert" +) + +func convertBigIntToBytes(value *big.Int) []byte { + return value.Bytes() +} + +func uint64ToBytes(value uint64) []byte { + buff := make([]byte, 8) + + binary.BigEndian.PutUint64(buff, value) + return buff +} + +func createDummyNodesMap() map[uint32][]sharding.Validator { + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")), + } + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 1, []byte("pkMeta1"), []byte("addrMeta1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pkMeta2"), []byte("addrMeta2")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + nodesMap[sharding.MetachainShardId] = listMeta + + return nodesMap +} + +func genRandSource(round uint64, randomness string) string { + return fmt.Sprintf("%d-%s", round, core.ToB64([]byte(randomness))) +} + +//------- NewIndexHashedNodesCoordinator + +func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + nil, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrNilHasher, err) +} + +func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) +} + +func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.NotNil(t, ihgs) + assert.Nil(t, err) +} + +//------- LoadEligibleList + +func TestIndexHashedGroupSelector_SetNilNodesMapShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil)) +} + +func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], ihgs.EligibleList()) +} + +//------- ComputeValidatorsGroup + +func TestIndexHashedGroupSelector_NewCoordinatorGroup0SizeShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) +} + +func TestIndexHashedGroupSelector_NewCoordinatorTooFewNodesShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 10, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrSmallShardEligibleListSize, err) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup(nil, 0, 0) + + assert.Nil(t, list2) + assert.Equal(t, sharding.ErrNilRandomness, err) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupInvalidShardIdShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte("radomness"), 0, 5) + + assert.Nil(t, list2) + assert.Equal(t, sharding.ErrInvalidShardId, err) +} + +//------- functionality tests + +func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness"), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, list, list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + + //this will return the list in order: + //element 0 will be first element + //element 1 will be the second + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + if string(uint64ToBytes(1))+randomness == s { + return convertBigIntToBytes(big.NewInt(1)) + } + + return nil + } + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrder(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + randSource := genRandSource(0, randomness) + + //this will return the list in reverse order: + //element 0 will be the second + //element 1 will be the first + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randSource == s { + return convertBigIntToBytes(big.NewInt(1)) + } + + if string(uint64ToBytes(1))+randSource == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + return nil + } + + validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")) + + list := []sharding.Validator{ + validator0, + validator1, + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + metaNode, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{metaNode} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, validator0, list2[1]) + assert.Equal(t, validator1, list2[0]) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + + //this will return the list in order: + //element 0 will be the first + //element 1 will be the second as the same index is being returned and 0 is already in list + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + if string(uint64ToBytes(1))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + return nil + } + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsShouldWork(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + randomnessWithRound := genRandSource(0, randomness) + + //script: + // for index 0, hasher will return 11 which will translate to 1, so 1 is the first element + // for index 1, hasher will return 1 which will translate to 1, 1 is already picked, try the next, 2 is the second element + // for index 2, hasher will return 9 which will translate to 9, 9 is the 3-rd element + // for index 3, hasher will return 9 which will translate to 9, 9 is already picked, try the next one, 0 is the 4-th element + // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, + // 3 is the 4-th element + // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element + script := make(map[string]*big.Int) + + script[string(uint64ToBytes(0))+randomnessWithRound] = big.NewInt(11) //will translate to 1, add 1 + script[string(uint64ToBytes(1))+randomnessWithRound] = big.NewInt(1) //will translate to 1, add 2 + script[string(uint64ToBytes(2))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 9 + script[string(uint64ToBytes(3))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 0 + script[string(uint64ToBytes(4))+randomnessWithRound] = big.NewInt(0) //will translate to 0, add 3 + script[string(uint64ToBytes(5))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 4 + + hasher.ComputeCalled = func(s string) []byte { + val, ok := script[s] + + if !ok { + assert.Fail(t, "should have not got here") + } + + return convertBigIntToBytes(val) + } + + validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1"), []byte("addr1")) + validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2"), []byte("addr2")) + validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3"), []byte("addr3")) + validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4"), []byte("addr4")) + validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5"), []byte("addr5")) + validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6"), []byte("addr6")) + validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7"), []byte("addr7")) + validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8"), []byte("addr8")) + validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9"), []byte("addr9")) + + list := []sharding.Validator{ + validator0, + validator1, + validator2, + validator3, + validator4, + validator5, + validator6, + validator7, + validator8, + validator9, + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + validatorMeta, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{validatorMeta} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 6, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, 6, len(list2)) + //check order as described in script + assert.Equal(t, validator1, list2[0]) + assert.Equal(t, validator2, list2[1]) + assert.Equal(t, validator9, list2[2]) + assert.Equal(t, validator0, list2[3]) + assert.Equal(t, validator3, list2[4]) + assert.Equal(t, validator4, list2[5]) +} + +func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { + consensusGroupSize := 21 + list := make([]sharding.Validator, 0) + + //generate 400 validators + for i := 0; i < 400; i++ { + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)), []byte("addr"+strconv.Itoa(i)))) + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + consensusGroupSize, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + randomness := strconv.Itoa(i) + list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Equal(b, consensusGroupSize, len(list2)) + } +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrNilPubKey(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey(nil) + + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrValidatorNotFound(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey([]byte("pk1")) + + assert.Equal(t, sharding.ErrValidatorNotFound, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldWork(t *testing.T) { + t.Parallel() + + list_meta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_meta"), []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_meta"), []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_meta"), []byte("addr2_meta")), + } + list_shard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard0"), []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard0"), []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard0"), []byte("addr2_shard0")), + } + list_shard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard1"), []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard1"), []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard1"), []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = list_meta + nodesMap[0] = list_shard0 + nodesMap[1] = list_shard1 + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 2, + nodesMap, + ) + + validator, shardId, err := ihgs.GetValidatorWithPublicKey([]byte("pk0_meta")) + assert.Nil(t, err) + assert.Equal(t, sharding.MetachainShardId, shardId) + assert.Equal(t, []byte("addr0_meta"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk1_shard0")) + assert.Nil(t, err) + assert.Equal(t, uint32(0), shardId) + assert.Equal(t, []byte("addr1_shard0"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk2_shard1")) + assert.Nil(t, err) + assert.Equal(t, uint32(1), shardId) + assert.Equal(t, []byte("addr2_shard1"), validator.Address()) +} diff --git a/sharding/interface.go b/sharding/interface.go new file mode 100644 index 00000000000..fe4a1753597 --- /dev/null +++ b/sharding/interface.go @@ -0,0 +1,45 @@ +package sharding + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +// MetachainShardId will be used to identify a shard ID as metachain +const MetachainShardId = uint32(0xFFFFFFFF) + +// Coordinator defines what a shard state coordinator should hold +type Coordinator interface { + NumberOfShards() uint32 + ComputeId(address state.AddressContainer) uint32 + SelfId() uint32 + SameShard(firstAddress, secondAddress state.AddressContainer) bool + CommunicationIdentifier(destShardID uint32) string + IsInterfaceNil() bool +} + +// Validator defines a node that can be allocated to a shard for participation in a consensus group as validator +// or block proposer +type Validator interface { + Stake() *big.Int + Rating() int32 + PubKey() []byte + Address() []byte +} + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinator interface { + PublicKeysSelector + SetNodesPerShards(nodes map[uint32][]Validator) error + ComputeValidatorsGroup(randomness []byte, round uint64, shardId uint32) (validatorsGroup []Validator, err error) + GetValidatorWithPublicKey(publicKey []byte) (validator Validator, shardId uint32, err error) + IsInterfaceNil() bool +} + +// PublicKeysSelector allows retrieval of eligible validators public keys +type PublicKeysSelector interface { + GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) +} diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go new file mode 100644 index 00000000000..0218936b5c0 --- /dev/null +++ b/sharding/mock/hasherMock.go @@ -0,0 +1,37 @@ +package mock + +import "crypto/sha256" + +var sha256EmptyHash []byte + +// HasherMock that will be used for testing +type HasherMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha *HasherMock) Compute(s string) []byte { + h := sha256.New() + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha *HasherMock) EmptyHash() []byte { + if len(sha256EmptyHash) == 0 { + sha256EmptyHash = sha.Compute("") + } + return sha256EmptyHash +} + +// Size return the required size in bytes +func (sha *HasherMock) Size() int { + return sha256.Size +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sha *HasherMock) IsInterfaceNil() bool { + if sha == nil { + return true + } + return false +} diff --git a/sharding/mock/hasherStub.go b/sharding/mock/hasherStub.go new file mode 100644 index 00000000000..216fc9d9909 --- /dev/null +++ b/sharding/mock/hasherStub.go @@ -0,0 +1,30 @@ +package mock + +type HasherStub struct { + ComputeCalled func(s string) []byte + EmptyHashCalled func() []byte + SizeCalled func() int +} + +// Compute will output the SHA's equivalent of the input string +func (hs *HasherStub) Compute(s string) []byte { + return hs.ComputeCalled(s) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (hs *HasherStub) EmptyHash() []byte { + return hs.EmptyHashCalled() +} + +// Size returns the required size in bytes +func (hs *HasherStub) Size() int { + return hs.SizeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hs *HasherStub) IsInterfaceNil() bool { + if hs == nil { + return true + } + return false +} diff --git a/sharding/mock/invalidNodesSetupMock.json b/sharding/mock/invalidNodesSetupMock.json index 1da83d4d669..67458949a71 100644 --- a/sharding/mock/invalidNodesSetupMock.json +++ b/sharding/mock/invalidNodesSetupMock.json @@ -8,10 +8,12 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" } ] } diff --git a/sharding/mock/nodesCoordinatorMock.go b/sharding/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..9ad5515d724 --- /dev/null +++ b/sharding/mock/nodesCoordinatorMock.go @@ -0,0 +1,71 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/sharding/mock/nodesSetupMock.json b/sharding/mock/nodesSetupMock.json index 78110c8a6b6..17cf384c5b4 100644 --- a/sharding/mock/nodesSetupMock.json +++ b/sharding/mock/nodesSetupMock.json @@ -8,19 +8,24 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" }, { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022" } ] } diff --git a/sharding/mock/validatorMock.go b/sharding/mock/validatorMock.go new file mode 100644 index 00000000000..e4f9bf01af8 --- /dev/null +++ b/sharding/mock/validatorMock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte + address []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/sharding/nodesSetup.go b/sharding/nodesSetup.go index 6d9a1fee4cb..535148e4562 100644 --- a/sharding/nodesSetup.go +++ b/sharding/nodesSetup.go @@ -9,9 +9,31 @@ import ( // InitialNode holds data from json type InitialNode struct { - PubKey string `json:"pubkey"` + PubKey string `json:"pubkey"` + Address string `json:"address"` + NodeInfo +} + +// NodeInfo holds node info +type NodeInfo struct { assignedShard uint32 pubKey []byte + address []byte +} + +// AssignedShard gets the node assigned shard +func (ni *NodeInfo) AssignedShard() uint32 { + return ni.assignedShard +} + +// Address gets the node address +func (ni *NodeInfo) Address() []byte { + return ni.address +} + +// PubKey gets the node public key +func (ni *NodeInfo) PubKey() []byte { + return ni.pubKey } // NodesSetup hold data for decoded data from json file @@ -29,7 +51,7 @@ type NodesSetup struct { nrOfShards uint32 nrOfNodes uint32 nrOfMetaChainNodes uint32 - allNodesPubKeys map[uint32][]string + allNodesInfo map[uint32][]*NodeInfo } // NewNodesSetup creates a new decoded nodes structure from json config file @@ -52,7 +74,7 @@ func NewNodesSetup(nodesFilePath string, numOfNodes uint64) (*NodesSetup, error) nodes.processMetaChainAssigment() nodes.processShardAssignment() - nodes.createInitialNodesPubKeys() + nodes.createInitialNodesInfo() return nodes, nil } @@ -64,6 +86,7 @@ func (ns *NodesSetup) processConfig() error { ns.nrOfMetaChainNodes = 0 for i := 0; i < len(ns.InitialNodes); i++ { ns.InitialNodes[i].pubKey, err = hex.DecodeString(ns.InitialNodes[i].PubKey) + ns.InitialNodes[i].address, err = hex.DecodeString(ns.InitialNodes[i].Address) // decoder treats empty string as correct, it is not allowed to have empty string as public key if ns.InitialNodes[i].PubKey == "" || err != nil { @@ -71,6 +94,12 @@ func (ns *NodesSetup) processConfig() error { return ErrCouldNotParsePubKey } + // decoder treats empty string as correct, it is not allowed to have empty string as address + if ns.InitialNodes[i].Address == "" || err != nil { + ns.InitialNodes[i].address = nil + return ErrCouldNotParseAddress + } + ns.nrOfNodes++ } @@ -133,32 +162,66 @@ func (ns *NodesSetup) processShardAssignment() { } } -func (ns *NodesSetup) createInitialNodesPubKeys() { +func (ns *NodesSetup) createInitialNodesInfo() { nrOfShardAndMeta := ns.nrOfShards + 1 - ns.allNodesPubKeys = make(map[uint32][]string, nrOfShardAndMeta) + ns.allNodesInfo = make(map[uint32][]*NodeInfo, nrOfShardAndMeta) for _, in := range ns.InitialNodes { - if in.pubKey != nil { - ns.allNodesPubKeys[in.assignedShard] = append(ns.allNodesPubKeys[in.assignedShard], string(in.pubKey)) + if in.pubKey != nil && in.address != nil { + ns.allNodesInfo[in.assignedShard] = append(ns.allNodesInfo[in.assignedShard], + &NodeInfo{in.assignedShard, in.pubKey, in.address}) } } } -// InitialNodesPubKeys - gets initial public keys +// InitialNodesPubKeys - gets initial nodes public keys func (ns *NodesSetup) InitialNodesPubKeys() map[uint32][]string { - return ns.allNodesPubKeys + allNodesPubKeys := make(map[uint32][]string, 0) + for shardId, nodesInfo := range ns.allNodesInfo { + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + allNodesPubKeys[shardId] = pubKeys + } + + return allNodesPubKeys +} + +// InitialNodesInfo - gets initial nodes info +func (ns *NodesSetup) InitialNodesInfo() map[uint32][]*NodeInfo { + return ns.allNodesInfo } -// InitialNodesPubKeysForShard - gets initial public keys +// InitialNodesPubKeysForShard - gets initial nodes public keys for shard func (ns *NodesSetup) InitialNodesPubKeysForShard(shardId uint32) ([]string, error) { - if ns.allNodesPubKeys[shardId] == nil { + if ns.allNodesInfo[shardId] == nil { + return nil, ErrShardIdOutOfRange + } + if len(ns.allNodesInfo[shardId]) == 0 { + return nil, ErrNoPubKeys + } + + nodesInfo := ns.allNodesInfo[shardId] + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + return pubKeys, nil +} + +// InitialNodesInfoForShard - gets initial nodes info for shard +func (ns *NodesSetup) InitialNodesInfoForShard(shardId uint32) ([]*NodeInfo, error) { + if ns.allNodesInfo[shardId] == nil { return nil, ErrShardIdOutOfRange } - if len(ns.allNodesPubKeys[shardId]) == 0 { + if len(ns.allNodesInfo[shardId]) == 0 { return nil, ErrNoPubKeys } - return ns.allNodesPubKeys[shardId], nil + return ns.allNodesInfo[shardId], nil } // NumberOfShards returns the calculated number of shards diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 3f0c072940f..9ee30a85038 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -8,18 +8,40 @@ import ( "github.com/stretchr/testify/assert" ) +var ( + PubKeys = []string{ + "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "24dea9b5c79174c558c38316b2df25b956c53f0d0128b7427d219834867cc1b0868b7faff0205fe23e5ffdf276acfad6423890c782c7be7b98a31d501e4276a015a54d9849109322130fc9a9cb61d183318d50fcde44fabcbf600051c7cb950304b05e82f90f2ac4647016f39439608cd64ccc82fe6e996289bb2150e4e3ab08", + } + + Address = []string{ + "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49", + "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836", + "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f", + "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac", + "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022", + "63f702e061385324a25dc4f1bcfc7e4f4692bcd80de71bd4dd7d6e2f67f92481", + } +) + func createNodesSetupOneShardOneNodeWithOneMeta() *sharding.NodesSetup { + noOfInitialNodes := 2 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 1 ns.MetaChainConsensusGroupSize = 1 ns.MetaChainMinNodes = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 2) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - + ns.InitialNodes[1].PubKey = PubKeys[1] + ns.InitialNodes[1].Address = Address[1] err := ns.ProcessConfig() if err != nil { return nil @@ -27,32 +49,24 @@ func createNodesSetupOneShardOneNodeWithOneMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShardTwoNodesWithOneMeta() *sharding.NodesSetup { + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 ns.MetaChainConsensusGroupSize = 1 - ns.MetaChainMinNodes = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 5) - ns.InitialNodes[0] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[1] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419", - } - ns.InitialNodes[2] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418", - } - ns.InitialNodes[3] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[4] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416", + ns.MetaChainMinNodes = 2 + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] } err := ns.ProcessConfig() @@ -62,35 +76,24 @@ func createNodesSetupTwoShardTwoNodesWithOneMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard5NodesWithMeta() *sharding.NodesSetup { + noOfInitialNodes := 5 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 - ns.MetaChainMinNodes = 1 ns.MetaChainConsensusGroupSize = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 6) - ns.InitialNodes[0] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7410", - } - ns.InitialNodes[1] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419", - } - ns.InitialNodes[2] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418", - } - ns.InitialNodes[3] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[4] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416", - } - ns.InitialNodes[5] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411", + ns.MetaChainMinNodes = 1 + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] } err := ns.ProcessConfig() @@ -100,31 +103,25 @@ func createNodesSetupTwoShard5NodesWithMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 ns.MetaChainMinNodes = 2 ns.MetaChainConsensusGroupSize = 2 - ns.InitialNodes = make([]*sharding.InitialNode, 6) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} - ns.InitialNodes[3] = &sharding.InitialNode{} - ns.InitialNodes[4] = &sharding.InitialNode{} - ns.InitialNodes[5] = &sharding.InitialNode{} - - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" - ns.InitialNodes[3].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416" - ns.InitialNodes[4].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411" - ns.InitialNodes[5].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7410" + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() if err != nil { @@ -133,7 +130,7 @@ func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } @@ -170,20 +167,23 @@ func TestNodesSetup_NewNodesShouldTrimInitialNodesList(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysFromNil(t *testing.T) { ns := sharding.NodesSetup{} - inPubKeys := ns.InitialNodesPubKeys() + inPubKeys := ns.InitialNodesInfo() assert.NotNil(t, ns) assert.Nil(t, inPubKeys) } func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{} - ns.InitialNodes = make([]*sharding.InitialNode, 2) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + ns.InitialNodes[0] = &sharding.InitialNode{} ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] err := ns.ProcessConfig() @@ -192,17 +192,19 @@ func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 0, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -211,6 +213,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -218,12 +221,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi MetaChainMinNodes: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -232,17 +236,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -251,6 +257,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesSh } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -259,11 +266,12 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -272,17 +280,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -291,6 +301,7 @@ func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 1 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -298,12 +309,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing MetaChainMinNodes: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -312,17 +324,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing } func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -331,6 +345,7 @@ func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardSho } func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 3 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -339,14 +354,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar MetaChainMinNodes: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 3) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -356,7 +370,7 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { ns := sharding.NodesSetup{} - inPK, err := ns.InitialNodesPubKeysForShard(0) + inPK, err := ns.InitialNodesInfoForShard(0) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -365,7 +379,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { ns := createNodesSetupOneShardOneNodeWithOneMeta() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -374,27 +388,27 @@ func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardGood(t *testing.T) { ns := createNodesSetupTwoShardTwoNodesWithOneMeta() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } func TestNodesSetup_InitialNodesPubKeysForShardGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - inPK, err := ns.InitialNodesPubKeysForShard(metaId) + inPK, err := ns.InitialNodesInfoForShard(metaId) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } func TestNodesSetup_PublicKeyNotGood(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - _, err := ns.GetShardIDForPubKey([]byte("5126b6505a73e59a994caa8f956f8c335d4399229de42102bb4814ca261c7419")) + _, err := ns.GetShardIDForPubKey([]byte(PubKeys[0])) assert.NotNil(t, ns) assert.NotNil(t, err) @@ -402,18 +416,18 @@ func TestNodesSetup_PublicKeyNotGood(t *testing.T) { func TestNodesSetup_PublicKeyGood(t *testing.T) { ns := createNodesSetupTwoShard5NodesWithMeta() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) assert.NotNil(t, ns) assert.Nil(t, err) - assert.Equal(t, uint32(1), selfId) + assert.Equal(t, uint32(0), selfId) } func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) @@ -425,7 +439,7 @@ func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { func TestNodesSetup_MetaPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418") + publicKey, err := hex.DecodeString(PubKeys[0]) selfId, err := ns.GetShardIDForPubKey(publicKey) diff --git a/sharding/sharding.go b/sharding/sharding.go deleted file mode 100644 index 50712d0ab8f..00000000000 --- a/sharding/sharding.go +++ /dev/null @@ -1,18 +0,0 @@ -package sharding - -import ( - "github.com/ElrondNetwork/elrond-go/data/state" -) - -// MetachainShardId will be used to identify a shard ID as metachain -const MetachainShardId = uint32(0xFFFFFFFF) - -// Coordinator defines what a shard state coordinator should hold -type Coordinator interface { - NumberOfShards() uint32 - ComputeId(address state.AddressContainer) uint32 - SelfId() uint32 - SameShard(firstAddress, secondAddress state.AddressContainer) bool - CommunicationIdentifier(destShardID uint32) string - IsInterfaceNil() bool -} diff --git a/consensus/validators/validator.go b/sharding/validator.go similarity index 64% rename from consensus/validators/validator.go rename to sharding/validator.go index fe80d2c7645..65a6f44d38c 100644 --- a/consensus/validators/validator.go +++ b/sharding/validator.go @@ -1,17 +1,18 @@ -package validators +package sharding import ( "math/big" ) type validator struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } // NewValidator creates a new instance of a validator -func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, error) { +func NewValidator(stake *big.Int, rating int32, pubKey []byte, address []byte) (*validator, error) { if stake == nil { return nil, ErrNilStake } @@ -24,10 +25,15 @@ func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, erro return nil, ErrNilPubKey } + if address == nil { + return nil, ErrNilAddress + } + return &validator{ - stake: stake, - rating: rating, - pubKey: pubKey, + stake: stake, + rating: rating, + pubKey: pubKey, + address: address, }, nil } @@ -46,6 +52,11 @@ func (v *validator) PubKey() []byte { return v.pubKey } +// Address returns the validator's address +func (v *validator) Address() []byte { + return v.address +} + // IsInterfaceNil returns true if there is no value under the interface func (v *validator) IsInterfaceNil() bool { if v == nil { diff --git a/sharding/validator_test.go b/sharding/validator_test.go new file mode 100644 index 00000000000..c0f3953e005 --- /dev/null +++ b/sharding/validator_test.go @@ -0,0 +1,78 @@ +package sharding_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(nil, 0, []byte("pk1"), []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilStake, err) +} + +func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(-1), 0, []byte("pk1"), []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNegativeStake, err) +} + +func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, nil, []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestValidator_NewValidatorShouldFailOnNilAddress(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), nil) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilAddress, err) +} + +func TestValidator_NewValidatorShouldWork(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.NotNil(t, validator) + assert.Nil(t, err) +} + +func TestValidator_StakeShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(1), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, big.NewInt(1), validator.Stake()) +} + +func TestValidator_PubKeyShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, []byte("pk1"), validator.PubKey()) +} + +func TestValidator_AddressShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, []byte("addr1"), validator.Address()) +} From 886c7ab453777c63b7702a3ab8dafc0c0332ec7d Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 1 Oct 2019 15:23:09 +0300 Subject: [PATCH 168/234] EN-4198 : add new metric in termui for final highest block nonce --- cmd/node/main.go | 1 + core/constants.go | 3 +++ process/block/shardblock.go | 6 ++++-- statusHandler/presenter/blockInfoGetters.go | 5 +++++ statusHandler/presenter/blockInfoGetters_test.go | 11 +++++++++++ statusHandler/view/interface.go | 1 + .../view/termuic/termuiRenders/widgetsRender.go | 6 ++++++ 7 files changed, 31 insertions(+), 2 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 21543bd60b1..e9cd54fada9 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -750,6 +750,7 @@ func initMetrics( appStatusHandler.SetUInt64Value(core.MetricNumShardHeadersProcessed, initUint) appStatusHandler.SetUInt64Value(core.MetricNumTimesInForkChoice, initUint) appStatusHandler.SetStringValue(core.MetricPublicKeyTxSign, initString) + appStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, initUint) } func startStatusPolling( diff --git a/core/constants.go b/core/constants.go index a0d6eb352b8..32b962bdf90 100644 --- a/core/constants.go +++ b/core/constants.go @@ -152,3 +152,6 @@ const MetricNumShardHeadersProcessed = "erd_num_shard_headers_processed" // MetricNumTimesInForkChoice is the metric that counts how many time a node was in fork choice const MetricNumTimesInForkChoice = "erd_fork_choice_count" + +//MetricHighestFinalBlockInShard is the metric that store highest nonce block notarized by meta for current shard +const MetricHighestFinalBlockInShard = "erd_highest_notarized_block_by_meta_for_current_shard" diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8c4b0d66ae7..d05ddca071f 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -733,13 +733,15 @@ func (sp *shardProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } + highestFinalBlockNonce := sp.forkDetector.GetHighestFinalBlockNonce() log.Info(fmt.Sprintf("shard block with nonce %d is the highest final block in shard %d\n", - sp.forkDetector.GetHighestFinalBlockNonce(), + highestFinalBlockNonce, sp.shardCoordinator.SelfId())) sp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) + sp.appStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, highestFinalBlockNonce) - hdrsToAttestPreviousFinal := uint32(header.Nonce-sp.forkDetector.GetHighestFinalBlockNonce()) + 1 + hdrsToAttestPreviousFinal := uint32(header.Nonce-highestFinalBlockNonce) + 1 sp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) err = chainHandler.SetCurrentBlockBody(body) diff --git a/statusHandler/presenter/blockInfoGetters.go b/statusHandler/presenter/blockInfoGetters.go index f1ef411b034..aa6adcd9d8b 100644 --- a/statusHandler/presenter/blockInfoGetters.go +++ b/statusHandler/presenter/blockInfoGetters.go @@ -44,3 +44,8 @@ func (psh *PresenterStatusHandler) GetBlockSize() uint64 { return miniBlocksSize + headerSize } + +// GetHighestFinalBlockInShard will return highest nonce block notarized by metachain for current shard +func (psh *PresenterStatusHandler) GetHighestFinalBlockInShard() uint64 { + return psh.getFromCacheAsUint64(core.MetricHighestFinalBlockInShard) +} diff --git a/statusHandler/presenter/blockInfoGetters_test.go b/statusHandler/presenter/blockInfoGetters_test.go index 57fca23dfbb..38a3f2816ac 100644 --- a/statusHandler/presenter/blockInfoGetters_test.go +++ b/statusHandler/presenter/blockInfoGetters_test.go @@ -138,3 +138,14 @@ func TestPresenterStatusHandler_GetBlockSize(t *testing.T) { blockExpectedSize := miniBlocksSize + headerSize assert.Equal(t, blockExpectedSize, result) } + +func TestPresenterStatusHandler_GetHighestFinalBlockInShard(t *testing.T) { + t.Parallel() + + highestFinalBlockNonce := uint64(100) + presenterStatusHandler := NewPresenterStatusHandler() + presenterStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, highestFinalBlockNonce) + result := presenterStatusHandler.GetHighestFinalBlockInShard() + + assert.Equal(t, highestFinalBlockNonce, result) +} diff --git a/statusHandler/view/interface.go b/statusHandler/view/interface.go index a8dc0d965a7..76f27235391 100644 --- a/statusHandler/view/interface.go +++ b/statusHandler/view/interface.go @@ -44,6 +44,7 @@ type Presenter interface { GetBlockSize() uint64 GetNumShardHeadersInPool() uint64 GetNumShardHeadersProcessed() uint64 + GetHighestFinalBlockInShard() uint64 // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index bc6133c4eaf..1608c51658e 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -239,6 +239,12 @@ func (wr *WidgetsRender) prepareBlockInfo() { crossCheckBlockHeight := wr.presenter.GetCrossCheckBlockHeight() rows[4] = []string{fmt.Sprintf("Cross check block height: %s", crossCheckBlockHeight)} + shardId := wr.presenter.GetShardId() + if shardId != uint64(sharding.MetachainShardId) { + highestBlockInShard := wr.presenter.GetHighestFinalBlockInShard() + rows[4][0] += fmt.Sprintf(" ,highest block nonce in shard: %d", highestBlockInShard) + } + consensusState := wr.presenter.GetConsensusState() rows[5] = []string{fmt.Sprintf("Consensus state: %s", consensusState)} From 7516f1910c3c04fa96f207cfe5ebea4588bd12b1 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 1 Oct 2019 15:41:25 +0300 Subject: [PATCH 169/234] EN-4207 : fix after review --- process/common_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/process/common_test.go b/process/common_test.go index 6362884b379..c8baa447161 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -46,10 +46,10 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { wg := sync.WaitGroup{} wgChanWasWritten := sync.WaitGroup{} - numConcurrentWrites := int32(100) - wg.Add(int(numConcurrentWrites)) - wgChanWasWritten.Add(int(numConcurrentWrites)) - for i := int32(0); i < numConcurrentWrites; i++ { + numConcurrentWrites := 100 + wg.Add(numConcurrentWrites) + wgChanWasWritten.Add(numConcurrentWrites) + for i := 0; i < numConcurrentWrites; i++ { go func() { wg.Done() time.Sleep(time.Millisecond) @@ -62,7 +62,7 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { wg.Wait() go func() { - for readsCnt < numConcurrentWrites { + for readsCnt < int32(numConcurrentWrites) { atomic.AddInt32(&readsCnt, int32(process.EmptyChannel(ch))) } }() @@ -71,7 +71,7 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { wgChanWasWritten.Wait() assert.Equal(t, 0, len(ch)) - assert.Equal(t, numConcurrentWrites, atomic.LoadInt32(&readsCnt)) + assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) } func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { From 2e7d968cae6e8a05e88100c504659027f689881c Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 1 Oct 2019 16:14:47 +0300 Subject: [PATCH 170/234] EN-4198 : fix after review --- statusHandler/view/termuic/termuiRenders/widgetsRender.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 1608c51658e..7ad16600884 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -241,8 +241,8 @@ func (wr *WidgetsRender) prepareBlockInfo() { shardId := wr.presenter.GetShardId() if shardId != uint64(sharding.MetachainShardId) { - highestBlockInShard := wr.presenter.GetHighestFinalBlockInShard() - rows[4][0] += fmt.Sprintf(" ,highest block nonce in shard: %d", highestBlockInShard) + highestFinalBlockInShard := wr.presenter.GetHighestFinalBlockInShard() + rows[4][0] += fmt.Sprintf(" ,highest final block nonce in shard: %d", highestFinalBlockInShard) } consensusState := wr.presenter.GetConsensusState() From 79f8cd04756d5878bba6d6656c77a5e325ec952f Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 1 Oct 2019 18:29:19 +0300 Subject: [PATCH 171/234] * Improved restoreMetaBlockIntoPool mechanism --- process/block/shardblock.go | 41 ++++++++----------------------------- 1 file changed, 8 insertions(+), 33 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 6362e474934..d696b7931b5 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -609,32 +609,8 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui } } - for _, metaBlockKey := range metaBlockPool.Keys() { - if len(miniBlockHashes) == 0 { - break - } - metaBlock, ok := metaBlockPool.Peek(metaBlockKey) - if !ok { - log.Error(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(data.HeaderHandler) - if !ok { - metaBlockPool.Remove(metaBlockKey) - log.Error(process.ErrWrongTypeAssertion.Error()) - continue - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[key] - if !ok { - continue - } - - sp.removeProcessedMiniBlock(metaBlockKey, []byte(key)) - } + for miniBlockHash := range miniBlockHashes { + sp.removeProcessedMiniBlock([]byte(miniBlockHash)) } return nil @@ -1738,15 +1714,14 @@ func (sp *shardProcessor) addProcessedMiniBlock(metaBlockHash []byte, miniBlockH sp.mutProcessedMiniBlocks.Unlock() } -func (sp *shardProcessor) removeProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { +func (sp *shardProcessor) removeProcessedMiniBlock(miniBlockHash []byte) { sp.mutProcessedMiniBlocks.Lock() - miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] - if !ok { - sp.mutProcessedMiniBlocks.Unlock() - return + for _, miniBlocksProcessed := range sp.processedMiniBlocks { + _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] + if isProcessed { + delete(miniBlocksProcessed, string(miniBlockHash)) + } } - - delete(miniBlocksProcessed, string(miniBlockHash)) sp.mutProcessedMiniBlocks.Unlock() } From 09eeee5bbcba6c3709d4d20c5a30b3df9d0be3e4 Mon Sep 17 00:00:00 2001 From: bogdan-rosianu Date: Tue, 1 Oct 2019 18:37:10 +0300 Subject: [PATCH 172/234] bug fixing related to consensus group size for shards and meta --- cmd/node/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index ff6809d38f3..425ff909a72 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -1052,8 +1052,8 @@ func createNodesCoordinator( } nbShards := nodesConfig.NumberOfShards() - shardConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) - metaConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) + shardConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) + metaConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) initNodesInfo := nodesConfig.InitialNodesInfo() initValidators := make(map[uint32][]sharding.Validator) From 8ed2faca4a6df7efd11bbd4cb0c00b44ace06142 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 2 Oct 2019 08:53:57 +0300 Subject: [PATCH 173/234] EN-3981 : fix after review --- core/indexer/data.go | 2 +- core/indexer/elasticsearch.go | 4 ++-- core/indexer/nilIndexer.go | 3 ++- node/defineOptions_test.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/core/indexer/data.go b/core/indexer/data.go index b6bb7c0812c..ef7011046f4 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -45,7 +45,7 @@ type Block struct { PrevHash string `json:"prevHash"` } -//ValidatorsPublicKeys is a structure containing fields for validator public keys +//ValidatorsPublicKeys is a structure containing fields for validators public keys type ValidatorsPublicKeys struct { PublicKeys []string `json:"publicKeys"` } diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index f1ece77a0dc..6c468d949a2 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -240,7 +240,7 @@ func (ei *elasticIndexer) SaveBlock( } } -// SaveRoundInfo will save data about a round on elastic +// SaveRoundInfo will save data about a round on elastic search func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { var buff bytes.Buffer @@ -291,7 +291,7 @@ func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][ } } -// IsNilIndexer will return a bool value that signal if its a nil indexer +// IsNilIndexer will return a bool value that signals if the indexer's implementation is a NilIndexer func (ei *elasticIndexer) IsNilIndexer() bool { return ei.isNilIndexer } diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go index 2ae4278beee..ad252de9f1f 100644 --- a/core/indexer/nilIndexer.go +++ b/core/indexer/nilIndexer.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" ) +// NilIndexer will be used when an Indexer is required, but another one isn't necessary or available type NilIndexer struct { } @@ -41,7 +42,7 @@ func (ni *NilIndexer) IsInterfaceNil() bool { return false } -// IsNilIndexer return if implementation of indexer is a nil implementation +// IsNilIndexer will return a bool value that signals if the indexer's implementation is a NilIndexer func (ni *NilIndexer) IsNilIndexer() bool { return true } diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 408395da9f1..b8c89e35624 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -799,6 +799,6 @@ func TestWithIndexer_ShouldWork(t *testing.T) { opt := WithIndexer(indexer) err := opt(node) - assert.True(t, node.indexer == indexer) + assert.Equal(t, indexer, node.indexer) assert.Nil(t, err) } From 92c4aac09f0e18ce9e06024b69914b420a2e4ff7 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 2 Oct 2019 09:07:12 +0300 Subject: [PATCH 174/234] EN-4198 : fix after review --- cmd/node/main.go | 2 +- core/constants.go | 4 ++-- statusHandler/view/termuic/termuiRenders/widgetsRender.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/node/main.go b/cmd/node/main.go index 261b7006e1b..84fd090cda0 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -774,7 +774,7 @@ func initMetrics( appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, initUint) appStatusHandler.SetStringValue(core.MetricConsensusState, initString) appStatusHandler.SetStringValue(core.MetricConsensusRoundState, initString) - appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, initString) + appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, "0") appStatusHandler.SetUInt64Value(core.MetricIsSyncing, isSyncing) appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, initString) appStatusHandler.SetUInt64Value(core.MetricNumProcessedTxs, initUint) diff --git a/core/constants.go b/core/constants.go index 32b962bdf90..6763676ed1c 100644 --- a/core/constants.go +++ b/core/constants.go @@ -153,5 +153,5 @@ const MetricNumShardHeadersProcessed = "erd_num_shard_headers_processed" // MetricNumTimesInForkChoice is the metric that counts how many time a node was in fork choice const MetricNumTimesInForkChoice = "erd_fork_choice_count" -//MetricHighestFinalBlockInShard is the metric that store highest nonce block notarized by meta for current shard -const MetricHighestFinalBlockInShard = "erd_highest_notarized_block_by_meta_for_current_shard" +//MetricHighestFinalBlockInShard is the metric that stores the highest nonce block notarized by metachain for current shard +const MetricHighestFinalBlockInShard = "erd_highest_notarized_block_by_metachain_for_current_shard" diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 7ad16600884..06575201556 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -242,7 +242,7 @@ func (wr *WidgetsRender) prepareBlockInfo() { shardId := wr.presenter.GetShardId() if shardId != uint64(sharding.MetachainShardId) { highestFinalBlockInShard := wr.presenter.GetHighestFinalBlockInShard() - rows[4][0] += fmt.Sprintf(" ,highest final block nonce in shard: %d", highestFinalBlockInShard) + rows[4][0] += fmt.Sprintf(" , highest final block nonce in shard: %d", highestFinalBlockInShard) } consensusState := wr.presenter.GetConsensusState() From af3ef413f08b1df6be47b49b5073e1f0bdded4a5 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 2 Oct 2019 10:22:37 +0300 Subject: [PATCH 175/234] EN-4198 : fix after review --- statusHandler/view/termuic/termuiRenders/widgetsRender.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 06575201556..78406f4a299 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -242,7 +242,7 @@ func (wr *WidgetsRender) prepareBlockInfo() { shardId := wr.presenter.GetShardId() if shardId != uint64(sharding.MetachainShardId) { highestFinalBlockInShard := wr.presenter.GetHighestFinalBlockInShard() - rows[4][0] += fmt.Sprintf(" , highest final block nonce in shard: %d", highestFinalBlockInShard) + rows[4][0] += fmt.Sprintf(", highest final block nonce in shard: %d", highestFinalBlockInShard) } consensusState := wr.presenter.GetConsensusState() From f7b40575e2093844a6bb76f1e236ced0a5deeda1 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Wed, 2 Oct 2019 13:33:10 +0300 Subject: [PATCH 176/234] * Improved management of requested/received meta header which are missing --- process/block/baseProcess.go | 8 +- process/block/displayBlock.go | 2 +- process/block/interceptedBlockHeader.go | 10 +- process/block/interceptedMetaBlockHeader.go | 8 +- process/block/metablock.go | 20 +- process/block/metablock_test.go | 28 +-- process/block/preprocess/basePreProcess.go | 13 +- process/block/shardblock.go | 195 +++++++++++--------- process/block/shardblock_test.go | 20 +- 9 files changed, 164 insertions(+), 140 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 8413b239092..17cd7ee2b4f 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -86,7 +86,7 @@ func (bp *baseProcessor) RevertAccountState() { } } -// AddLastNotarizedHdr adds the last notarized header +// AddLastNotarizedHdr adds the last notarized hdr func (bp *baseProcessor) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { bp.mutNotarizedHdrs.Lock() bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) @@ -210,7 +210,7 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } //TODO: add verification if rand seed was correctly computed add other verification - //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected + //TODO: check here if the 2 hdr blocks were correctly signed and the consensus group was correctly elected if prevHdr.GetRound() >= currHdr.GetRound() { log.Debug(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", currHdr.GetShardID(), prevHdr.GetRound(), currHdr.GetRound())) @@ -355,8 +355,8 @@ func (bp *baseProcessor) getLastNotarizedHdr(shardId uint32) (data.HeaderHandler } // SetLastNotarizedHeadersSlice sets the headers blocks in notarizedHdrs for every shard -// This is done when starting a new epoch so metachain can use it when validating next shard header blocks -// and shard can validate the next meta header +// This is done when starting a new epoch so metachain can use it when validating next shard hdr blocks +// and shard can validate the next meta hdr func (bp *baseProcessor) setLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { //TODO: protect this to be called only once at genesis time //TODO: do this on constructor as it is a must to for blockprocessor to work diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 085189bb40c..bdb48427e4e 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -226,7 +226,7 @@ func DisplayLastNotarized( shardId uint32) { if lastNotarizedHdrForShard == nil || lastNotarizedHdrForShard.IsInterfaceNil() { - log.Error("last notarized header for shard is nil") + log.Error("last notarized hdr for shard is nil") return } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index b1493158f5b..baadd92362d 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -38,17 +38,17 @@ func NewInterceptedHeader( } } -// SetHash sets the hash of this header. The hash will also be the ID of this object +// SetHash sets the hash of this hdr. The hash will also be the ID of this object func (inHdr *InterceptedHeader) SetHash(hash []byte) { inHdr.hash = hash } -// Hash gets the hash of this header +// Hash gets the hash of this hdr func (inHdr *InterceptedHeader) Hash() []byte { return inHdr.hash } -// Shard returns the shard ID for which this header is addressed +// Shard returns the shard ID for which this hdr is addressed func (inHdr *InterceptedHeader) Shard() uint32 { return inHdr.ShardId } @@ -63,7 +63,7 @@ func (inHdr *InterceptedHeader) GetUnderlyingObject() interface{} { return inHdr.Header } -// IntegrityAndValidity checks the integrity and validity of a block header wrapper +// IntegrityAndValidity checks the integrity and validity of a block hdr wrapper func (inHdr *InterceptedHeader) IntegrityAndValidity(coordinator sharding.Coordinator) error { err := inHdr.Integrity(coordinator) if err != nil { @@ -144,7 +144,7 @@ func (inHdr *InterceptedHeader) VerifySig() error { return err } - // get marshalled block header without signature and bitmap + // get marshalled block hdr without signature and bitmap // as this is the message that was signed headerCopy := *inHdr.Header headerCopy.Signature = nil diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index b0fbcdf0227..010606a2858 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -38,12 +38,12 @@ func NewInterceptedMetaHeader( } } -// SetHash sets the hash of this header. The hash will also be the ID of this object +// SetHash sets the hash of this hdr. The hash will also be the ID of this object func (imh *InterceptedMetaHeader) SetHash(hash []byte) { imh.hash = hash } -// Hash gets the hash of this header +// Hash gets the hash of this hdr func (imh *InterceptedMetaHeader) Hash() []byte { return imh.hash } @@ -53,7 +53,7 @@ func (imh *InterceptedMetaHeader) GetMetaHeader() *block.MetaBlock { return imh.MetaBlock } -// IntegrityAndValidity checks the integrity and validity of a block header wrapper +// IntegrityAndValidity checks the integrity and validity of a block hdr wrapper func (imh *InterceptedMetaHeader) IntegrityAndValidity(coordinator sharding.Coordinator) error { err := imh.Integrity(coordinator) if err != nil { @@ -136,7 +136,7 @@ func (imh *InterceptedMetaHeader) VerifySig() error { return err } - // get marshalled block header without signature and bitmap + // get marshalled block hdr without signature and bitmap // as this is the message that was signed headerCopy := *imh.MetaBlock headerCopy.Signature = nil diff --git a/process/block/metablock.go b/process/block/metablock.go index c5c49e6ce24..21178a59570 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -147,7 +147,7 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { if err == process.ErrBlockHashDoesNotMatch { - log.Info(fmt.Sprintf("requested missing meta header with hash %s for shard %d\n", + log.Info(fmt.Sprintf("requested missing meta hdr with hash %s for shard %d\n", core.ToB64(headerHandler.GetPrevHash()), headerHandler.GetShardID())) @@ -743,7 +743,7 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high break } - // found a header with the next nonce + // found a hdr with the next nonce tmpHdr := sortedHdrPerShard[shId][i] if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) @@ -841,7 +841,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return true, hdrIds } - // found a header with the next nonce + // found a hdr with the next nonce tmpHdr := sortedShardHdrs[i] if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) @@ -862,7 +862,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return false, nil } -// receivedHeader is a call back function which is called when a new header +// receivedHeader is a call back function which is called when a new hdr // is added in the headers pool func (mp *metaProcessor) receivedHeader(headerHash []byte) { shardHdrsCache := mp.dataPool.ShardHeaders() @@ -885,7 +885,7 @@ func (mp *metaProcessor) receivedHeader(headerHash []byte) { return } - log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received hdr with hash %s and nonce %d from network\n", core.ToB64(headerHash), header.GetNonce())) @@ -925,7 +925,7 @@ func (mp *metaProcessor) receivedHeader(headerHash []byte) { } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the -// current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related +// current block. It requests the nextKValidity headers greater than the highest shard hdr, for each shard, related // to the block which should be processed func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { requestedBlockHeaders := uint32(0) @@ -1178,9 +1178,9 @@ func (mp *metaProcessor) createPeerInfo() ([]block.PeerData, error) { return peerInfo, nil } -// CreateBlockHeader creates a miniblock header list given a block body +// CreateBlockHeader creates a miniblock hdr list given a block body func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + log.Debug(fmt.Sprintf("started creating block hdr in round %d\n", round)) // TODO: add PrevRandSeed and RandSeed when BLS signing is completed header := &block.MetaBlock{ ShardInfo: make([]block.ShardData, 0), @@ -1233,7 +1233,7 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( mrsData := make(map[uint32][]byte) mrsTxs := make(map[string][][]byte) - // send headers which can validate the current header + // send headers which can validate the current hdr return mrsData, mrsTxs, nil } @@ -1357,7 +1357,7 @@ func (mp *metaProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { return &body } -// DecodeBlockHeader method decodes block header from a given byte array +// DecodeBlockHeader method decodes block hdr from a given byte array func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { if dta == nil { return nil diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 271413ff620..e614803bf4d 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -901,7 +901,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, removeHdrWasCalled) assert.True(t, forkDetectorAddCalled) - //this should sleep as there is an async call to display current header and block in CommitBlock + //this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } @@ -1440,7 +1440,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1464,7 +1464,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash1, headers[0]) pool.ShardHeaders().Put(hdrHash11, headers[1]) - // header shard 1 + // hdr shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1488,7 +1488,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash2, headers[2]) pool.ShardHeaders().Put(hdrHash22, headers[3]) - // header shard 2 + // hdr shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1608,7 +1608,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1632,7 +1632,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash1, headers[0]) pool.ShardHeaders().Put(hdrHash11, headers[1]) - // header shard 1 + // hdr shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1656,7 +1656,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash2, headers[2]) pool.ShardHeaders().Put(hdrHash22, headers[3]) - // header shard 2 + // hdr shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1817,7 +1817,7 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -1847,13 +1847,13 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { shDataPrev := block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - // test header not in pool and defer called + // test hdr not in pool and defer called err := mp.SaveLastNotarizedHeader(metaHdr) assert.Equal(t, process.ErrMissingHeader, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) - // wrong header type in pool and defer called + // wrong hdr type in pool and defer called pool.ShardHeaders().Put(currHash, metaHdr) pool.ShardHeaders().Put(prevHash, prevHdr) @@ -1911,7 +1911,7 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2131,7 +2131,7 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2245,7 +2245,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2354,7 +2354,7 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index b1355dc0f20..d3b07091c0b 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -163,9 +163,10 @@ func (bpp *basePreProcess) computeExistingAndMissing( currType block.Type, txPool dataRetriever.ShardedDataCacherNotifier, ) map[uint32][]*txsHashesInfo { + missingTxsForShard := make(map[uint32][]*txsHashesInfo, 0) - forBlock.mutTxsForBlock.Lock() + forBlock.mutTxsForBlock.Lock() for i := 0; i < len(body); i++ { miniBlock := body[i] if miniBlock.Type != currType { @@ -177,18 +178,19 @@ func (bpp *basePreProcess) computeExistingAndMissing( for j := 0; j < len(miniBlock.TxHashes); j++ { txHash := miniBlock.TxHashes[j] - tx, _ := process.GetTransactionHandlerFromPool( + tx, err := process.GetTransactionHandlerFromPool( miniBlock.SenderShardID, miniBlock.ReceiverShardID, txHash, txPool) - if tx == nil || tx.IsInterfaceNil() { + if err != nil { txHashes = append(txHashes, txHash) forBlock.missingTxs++ - } else { - forBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: tx, txShardInfo: txShardInfo} + continue } + + forBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: tx, txShardInfo: txShardInfo} } if len(txHashes) > 0 { @@ -196,7 +198,6 @@ func (bpp *basePreProcess) computeExistingAndMissing( &txsHashesInfo{txHashes: txHashes, receiverShardID: miniBlock.ReceiverShardID}) } } - forBlock.mutTxsForBlock.Unlock() return missingTxsForShard diff --git a/process/block/shardblock.go b/process/block/shardblock.go index d696b7931b5..b5b1848971b 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -20,6 +20,17 @@ import ( const maxCleanTime = time.Second +type hdrInfo struct { + hdr data.HeaderHandler +} + +type hdrForBlock struct { + missingHdrs uint32 + missingFinalHdrs uint32 + mutHdrsForBlock sync.RWMutex + hdrHashAndInfo map[string]*hdrInfo +} + // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor @@ -31,10 +42,8 @@ type shardProcessor struct { mutUsedMetaHdrsHashes sync.Mutex usedMetaHdrsHashes map[uint64][][]byte - mutRequestedMetaHdrsHashes sync.RWMutex - requestedMetaHdrsHashes map[string]bool - currHighestMetaHdrNonce uint64 - allNeededMetaHdrsFound bool + hdrsForCurrBlock hdrForBlock + currHighestMetaHdrNonce uint64 processedMiniBlocks map[string]map[string]struct{} mutProcessedMiniBlocks sync.RWMutex @@ -120,7 +129,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, process.ErrNilTransactionPool } - sp.requestedMetaHdrsHashes = make(map[string]bool) + sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) sp.usedMetaHdrsHashes = make(map[uint64][][]byte) sp.processedMiniBlocks = make(map[string]map[string]struct{}) @@ -132,7 +141,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { sp.onRequestHeaderHandler = arguments.RequestHandler.RequestHeader sp.metaBlockFinality = process.MetaBlockFinality - sp.allNeededMetaHdrsFound = true return &sp, nil } @@ -152,7 +160,7 @@ func (sp *shardProcessor) ProcessBlock( err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { if err == process.ErrBlockHashDoesNotMatch { - log.Info(fmt.Sprintf("requested missing shard header with hash %s for shard %d\n", + log.Info(fmt.Sprintf("requested missing shard hdr with hash %s for shard %d\n", core.ToB64(headerHandler.GetPrevHash()), headerHandler.GetShardID())) @@ -199,7 +207,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - sp.txCoordinator.CreateBlockStarted() + sp.CreateBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) @@ -215,13 +223,17 @@ func (sp *shardProcessor) ProcessBlock( if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) err = sp.waitForMetaHdrHashes(haveTime()) - sp.mutRequestedMetaHdrsHashes.Lock() - sp.allNeededMetaHdrsFound = true - unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) - sp.mutRequestedMetaHdrsHashes.Unlock() + + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + missingHdrs := sp.hdrsForCurrBlock.missingHdrs + sp.hdrsForCurrBlock.missingHdrs = 0 + sp.hdrsForCurrBlock.missingFinalHdrs = 0 + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + if requestedMetaHdrs > 0 { - log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) + log.Info(fmt.Sprintf("received %d missing meta headers\n", requestedMetaHdrs-missingHdrs)) } + if err != nil { return err } @@ -282,7 +294,7 @@ func (sp *shardProcessor) ProcessBlock( func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.HeaderHandler) error { sp.specialAddressHandler.ClearMetaConsensusData() - // for every finalized metablock header, reward the metachain consensus group members with accounts in shard + // for every finalized metablock hdr, reward the metachain consensus group members with accounts in shard for _, metaBlock := range finalizedMetaBlocks { round := metaBlock.GetRound() epoch := metaBlock.GetEpoch() @@ -305,30 +317,30 @@ func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoc // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { - metablockCache := sp.dataPool.MetaBlocks() - if metablockCache == nil { - return process.ErrNilMetaBlockPool - } - tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return err } currAddedMetaHdrs := make([]*block.MetaBlock, 0) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for _, metaHash := range header.MetaBlockHashes { - value, ok := metablockCache.Peek(metaHash) + value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaHash)] if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrNilMetaBlockHeader } - metaHdr, ok := value.(*block.MetaBlock) + metaHdr, ok := value.hdr.(*block.MetaBlock) if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrWrongTypeAssertion } currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() if len(currAddedMetaHdrs) == 0 { return nil @@ -373,7 +385,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error break } - // found a header with the next nonce + // found a hdr with the next nonce if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) if err != nil { @@ -441,7 +453,7 @@ func (sp *shardProcessor) getFinalityAttestingHeaders( return orderedMetaBlocks, nil } -// check if header has the same miniblocks as presented in body +// check if hdr has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { @@ -806,7 +818,7 @@ func (sp *shardProcessor) cleanTxsPools() { log.Info(fmt.Sprintf("Total txs removed from pools cleaner %d", sp.txsPoolsCleaner.NumRemovedTxs())) } -// getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain +// getHighestHdrForOwnShardFromMetachain calculates the highest shard hdr notarized by metachain func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( processedHdrs []data.HeaderHandler, ) ([]data.HeaderHandler, [][]byte, error) { @@ -862,7 +874,7 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr if err != nil { go sp.onRequestHeaderHandler(shardInfo.ShardId, shardInfo.HeaderHash) - log.Info(fmt.Sprintf("requested missing shard header with hash %s for shard %d\n", + log.Info(fmt.Sprintf("requested missing shard hdr with hash %s for shard %d\n", core.ToB64(shardInfo.HeaderHash), shardInfo.ShardId)) @@ -898,24 +910,28 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) return nil, err } - for _, metaBlockKey := range header.MetaBlockHashes { - obj, ok := sp.dataPool.MetaBlocks().Peek(metaBlockKey) + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, metaBlockHash := range header.MetaBlockHashes { + value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil, process.ErrNilMetaBlockHeader } - metaBlock, ok := obj.(*block.MetaBlock) + metaBlock, ok := value.hdr.(*block.MetaBlock) if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil, process.ErrWrongTypeAssertion } crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for key := range crossMiniBlockHashes { if usedMbs[key] { - sp.addProcessedMiniBlock(metaBlockKey, []byte(key)) + sp.addProcessedMiniBlock(metaBlockHash, []byte(key)) } } } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return processedMetaHeaders, nil } @@ -926,7 +942,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( usedMetaBlockHashes [][]byte, ) ([]data.HeaderHandler, error) { if usedMiniBlocks == nil || usedMetaBlockHashes == nil { - // not an error, it can happen that no metablock header or no miniblock is used. + // not an error, it can happen that no metablock hdr or no miniblock is used. return make([]data.HeaderHandler, 0), nil } @@ -970,7 +986,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( return nil, nil, process.ErrWrongTypeAssertion } - log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) + log.Debug(fmt.Sprintf("meta hdr nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { @@ -988,7 +1004,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( delete(miniBlockHashes, key) } - log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) + log.Debug(fmt.Sprintf("cross mini blocks in meta hdr: %d\n", len(crossMiniBlockHashes))) processedAll := true for key := range crossMiniBlockHashes { @@ -1104,38 +1120,38 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { core.ToB64(metaBlockHash), metaBlock.GetNonce())) - sp.mutRequestedMetaHdrsHashes.Lock() + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - if !sp.allNeededMetaHdrsFound { - if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { - delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) + if sp.hdrsForCurrBlock.missingHdrs > 0 || sp.hdrsForCurrBlock.missingFinalHdrs > 0 { + hdrInfoForHash := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] + if hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) { + sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)].hdr = metaBlock + sp.hdrsForCurrBlock.missingHdrs-- if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { sp.currHighestMetaHdrNonce = metaBlock.GetNonce() } } - lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqMetaHdrsHashes == 0 { - requestedBlockHeaders := sp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - log.Info(fmt.Sprintf("received all final meta headers\n")) - areFinalAttestingHdrsInCache = true + if sp.hdrsForCurrBlock.missingHdrs == 0 { + missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs + sp.hdrsForCurrBlock.missingFinalHdrs = sp.requestFinalMissingHeaders() + if sp.hdrsForCurrBlock.missingFinalHdrs == 0 { + log.Info(fmt.Sprintf("received %d missing final meta headers\n", missingFinalHdrs)) } else { - log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) + log.Info(fmt.Sprintf("requested %d missing final meta headers\n", sp.hdrsForCurrBlock.missingHdrs)) } } - sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache - - sp.mutRequestedMetaHdrsHashes.Unlock() + missingHdrs := sp.hdrsForCurrBlock.missingHdrs + missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { + if missingHdrs == 0 || missingFinalHdrs == 0 { sp.chRcvAllMetaHdrs <- true } } else { - sp.mutRequestedMetaHdrsHashes.Unlock() + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) @@ -1153,7 +1169,7 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the -// current block. It requests the metaBlockFinality headers greater than the highest meta header related to the block +// current block. It requests the metaBlockFinality headers greater than the highest meta hdr related to the block // which should be processed func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { requestedBlockHeaders := uint32(0) @@ -1162,14 +1178,18 @@ func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { continue } - _, _, err := process.GetMetaHeaderFromPoolWithNonce( + metaBlock, metaBlockHash, err := process.GetMetaHeaderFromPoolWithNonce( i, sp.dataPool.MetaBlocks(), sp.dataPool.HeadersNonces()) + if err != nil { requestedBlockHeaders++ go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) + continue } + + sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] = &hdrInfo{hdr: metaBlock} } return requestedBlockHeaders @@ -1178,57 +1198,52 @@ func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint32) { _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) - sp.mutRequestedMetaHdrsHashes.Lock() - - sp.requestedMetaHdrsHashes = make(map[string]bool) - sp.allNeededMetaHdrsFound = true - if len(header.MetaBlockHashes) == 0 { - sp.mutRequestedMetaHdrsHashes.Unlock() return 0, 0 } - missingHeaderHashes := sp.computeMissingHeaders(header) + missingHeaderHashes := sp.computeMissingAndExistingMetaHeaders(header) - requestedBlockHeaders := uint32(0) + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for _, hash := range missingHeaderHashes { - requestedBlockHeaders++ - sp.requestedMetaHdrsHashes[string(hash)] = true + sp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil} go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) } - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } else { - requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } + if sp.hdrsForCurrBlock.missingHdrs == 0 { + sp.hdrsForCurrBlock.missingFinalHdrs = sp.requestFinalMissingHeaders() } - sp.mutRequestedMetaHdrsHashes.Unlock() + requestedHdrs := sp.hdrsForCurrBlock.missingHdrs + requestedFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return requestedBlockHeaders, requestedFinalBlockHeaders + return requestedHdrs, requestedFinalHdrs } -func (sp *shardProcessor) computeMissingHeaders(header *block.Header) [][]byte { +func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Header) [][]byte { missingHeaders := make([][]byte, 0) sp.currHighestMetaHdrNonce = uint64(0) + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(header.MetaBlockHashes); i++ { hdr, err := process.GetMetaHeaderFromPool( header.MetaBlockHashes[i], sp.dataPool.MetaBlocks()) + if err != nil { missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) + sp.hdrsForCurrBlock.missingHdrs++ continue } + sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr} + if hdr.Nonce > sp.currHighestMetaHdrNonce { sp.currHighestMetaHdrNonce = hdr.Nonce } } + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return missingHeaders } @@ -1250,24 +1265,21 @@ func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) erro } func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes [][]byte) (map[string][]byte, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return nil, err } mMiniBlockMeta := make(map[string][]byte) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for _, metaHash := range metaHashes { - val, _ := metaBlockCache.Peek(metaHash) - if val == nil { + value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaHash)] + if !ok { continue } - hdr, ok := val.(*block.MetaBlock) + hdr, ok := value.hdr.(*block.MetaBlock) if !ok { continue } @@ -1287,6 +1299,7 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes mMiniBlockMeta[mbHash] = metaHash } } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return mMiniBlockMeta, nil } @@ -1354,7 +1367,7 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd return true } - // found a header with the next nonce + // found a hdr with the next nonce tmpHdr := sortedHdrs[i].hdr if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) @@ -1374,7 +1387,7 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd return false } -// full verification through metachain header +// full verification through metachain hdr func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( noShards uint32, maxItemsInBlock uint32, @@ -1422,7 +1435,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) if itemsAddedInHeader >= maxItemsInBlock { - log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) + log.Info(fmt.Sprintf("%d max records allowed to be added in shard hdr has been reached\n", maxItemsInBlock)) break } @@ -1551,9 +1564,9 @@ func (sp *shardProcessor) createMiniBlocks( return miniBlocks, nil } -// CreateBlockHeader creates a miniblock header list given a block body +// CreateBlockHeader creates a miniblock hdr list given a block body func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) + log.Debug(fmt.Sprintf("started creating block hdr in round %d\n", round)) header := &block.Header{ MiniBlockHeaders: make([]block.MiniBlockHeader, 0), RootHash: sp.getRootHash(), @@ -1674,7 +1687,7 @@ func (sp *shardProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { return body } -// DecodeBlockHeader method decodes block header from a given byte array +// DecodeBlockHeader method decodes block hdr from a given byte array func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { if dta == nil { return nil @@ -1752,3 +1765,13 @@ func (sp *shardProcessor) isMiniBlockProcessed(metaBlockHash []byte, miniBlockHa return isProcessed } + +func (sp *shardProcessor) CreateBlockStarted() { + sp.txCoordinator.CreateBlockStarted() + + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + sp.hdrsForCurrBlock.missingHdrs = 0 + sp.hdrsForCurrBlock.missingFinalHdrs = 0 + sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3cda2c559f5..7aa5d722264 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1755,7 +1755,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { txHash := []byte("txHash") rootHash := []byte("root hash") - hdrHash := []byte("header hash") + hdrHash := []byte("hdr hash") hdr := &block.Header{ Nonce: 1, Round: 1, @@ -1836,7 +1836,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { txHash := []byte("tx_hash1") rootHash := []byte("root hash") - hdrHash := []byte("header hash") + hdrHash := []byte("hdr hash") randSeed := []byte("rand seed") prevHdr := &block.Header{ @@ -1927,7 +1927,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, forkDetectorAddCalled) assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) - //this should sleep as there is an async call to display current header and block in CommitBlock + //this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } @@ -1937,7 +1937,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { txHash := []byte("tx_hash1") rootHash := []byte("root hash") - hdrHash := []byte("header hash") + hdrHash := []byte("hdr hash") randSeed := []byte("rand seed") prevHdr := &block.Header{ @@ -3221,7 +3221,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { //put the existing headers inside datapool - //header shard 0 + //hdr shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3338,7 +3338,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { RandSeed: prevRandSeed} notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - //header shard 0 + //hdr shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3363,7 +3363,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { mbHeaders := make([]block.MiniBlockHeader, 0) blockHeader := &block.Header{} - // test header not in pool and defer called + // test hdr not in pool and defer called processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) @@ -3378,7 +3378,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) assert.Equal(t, 0, len(processedMetaHdrs)) - // wrong header type in pool and defer called + // wrong hdr type in pool and defer called dataPool.MetaBlocks().Put(currHash, shardHdr) hashes := make([][]byte, 0) @@ -3533,7 +3533,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes miniBlocks := make([]block.MiniBlock, 0) miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 + //hdr shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3673,7 +3673,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin miniBlocks := make([]block.MiniBlock, 0) miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //header shard 0 + //hdr shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, From d6d942de1734f54bb82db59a5db995fd40b1ff8e Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 2 Oct 2019 16:22:22 +0300 Subject: [PATCH 177/234] bugfixing transactions flood: 1. added an allowed window on nonce for received txs 2. added a throttler on tx interceptor 3. increased "seen" messages var from pubsub to 10 minutes --- core/errors.go | 3 + core/throttler/numGoRoutineThrottler.go | 49 ++++ core/throttler/numGoRoutineThrottler_test.go | 93 ++++++++ p2p/libp2p/netMessenger.go | 4 + process/dataValidators/export_test.go | 3 + process/dataValidators/txValidator.go | 6 +- process/dataValidators/txValidator_test.go | 18 ++ process/errors.go | 21 +- .../shard/interceptorsContainerFactory.go | 62 +++-- process/interface.go | 8 + process/mock/interceptorThrottlerStub.go | 36 +++ process/transaction/interceptor.go | 15 +- process/transaction/interceptor_test.go | 215 ++++++++++++++++-- 13 files changed, 472 insertions(+), 61 deletions(-) create mode 100644 core/throttler/numGoRoutineThrottler.go create mode 100644 core/throttler/numGoRoutineThrottler_test.go create mode 100644 process/dataValidators/export_test.go create mode 100644 process/mock/interceptorThrottlerStub.go diff --git a/core/errors.go b/core/errors.go index 87add20851b..dbc2989a41a 100644 --- a/core/errors.go +++ b/core/errors.go @@ -39,3 +39,6 @@ var ErrEmptyFile = errors.New("empty file provided") // ErrInvalidIndex signals that an invalid private key index has been provided var ErrInvalidIndex = errors.New("invalid private key index") + +// ErrNotPositiveValue signals that a 0 or negative value has been provided +var ErrNotPositiveValue = errors.New("the provided value is not positive") diff --git a/core/throttler/numGoRoutineThrottler.go b/core/throttler/numGoRoutineThrottler.go new file mode 100644 index 00000000000..c72d8d06066 --- /dev/null +++ b/core/throttler/numGoRoutineThrottler.go @@ -0,0 +1,49 @@ +package throttler + +import ( + "sync/atomic" + + "github.com/ElrondNetwork/elrond-go/core" +) + +// NumGoRoutineThrottler can limit the number of go routines launched +type NumGoRoutineThrottler struct { + max int32 + counter int32 +} + +// NewNumGoRoutineThrottler creates a new num go routine throttler instance +func NewNumGoRoutineThrottler(max int32) (*NumGoRoutineThrottler, error) { + if max <= 0 { + return nil, core.ErrNotPositiveValue + } + + return &NumGoRoutineThrottler{ + max: max, + }, nil +} + +// CanProcess returns true if current counter is less than max +func (ngrt *NumGoRoutineThrottler) CanProcess() bool { + valCounter := atomic.LoadInt32(&ngrt.counter) + + return valCounter < ngrt.max +} + +// StartProcessing will increment current counter +func (ngrt *NumGoRoutineThrottler) StartProcessing() { + atomic.AddInt32(&ngrt.counter, 1) +} + +// EndProcessing will decrement current counter +func (ngrt *NumGoRoutineThrottler) EndProcessing() { + atomic.AddInt32(&ngrt.counter, -1) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ngrt *NumGoRoutineThrottler) IsInterfaceNil() bool { + if ngrt == nil { + return true + } + return false +} diff --git a/core/throttler/numGoRoutineThrottler_test.go b/core/throttler/numGoRoutineThrottler_test.go new file mode 100644 index 00000000000..ebd0ac6d606 --- /dev/null +++ b/core/throttler/numGoRoutineThrottler_test.go @@ -0,0 +1,93 @@ +package throttler_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/throttler" + "github.com/stretchr/testify/assert" +) + +func TestNewNumGoRoutineThrottler_WithNegativeShouldError(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(-1) + + assert.Nil(t, nt) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + +func TestNewNumGoRoutineThrottler_WithZeroShouldError(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(0) + + assert.Nil(t, nt) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + +func TestNewNumGoRoutineThrottler_ShouldWork(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(1) + + assert.NotNil(t, nt) + assert.Nil(t, err) +} + +func TestNumGoRoutineThrottler_CanProcessMessageWithZeroCounter(t *testing.T) { + t.Parallel() + + nt, _ := throttler.NewNumGoRoutineThrottler(1) + + assert.True(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterEqualsMax(t *testing.T) { + t.Parallel() + + nt, _ := throttler.NewNumGoRoutineThrottler(1) + nt.StartProcessing() + + assert.False(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMaxLessThanOne(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max-1; i++ { + nt.StartProcessing() + } + + assert.True(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMax(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max; i++ { + nt.StartProcessing() + } + + assert.False(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMaxLessOneFromEndProcessMessage(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max; i++ { + nt.StartProcessing() + } + nt.EndProcessing() + + assert.True(t, nt.CanProcess()) +} diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 36db5498907..1b570adce01 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -31,6 +31,8 @@ const DirectSendID = protocol.ID("/directsend/1.0.0") const refreshPeersOnTopic = time.Second * 60 const ttlPeersOnTopic = time.Second * 120 +const pubsubTimeCacheDuration = 10 * time.Minute + //TODO remove the header size of the message when commit d3c5ecd3a3e884206129d9f2a9a4ddfd5e7c8951 from // https://github.com/libp2p/go-libp2p-pubsub/pull/189/commits will be part of a new release var messageHeader = 64 * 1024 //64kB @@ -178,6 +180,8 @@ func createPubSub(ctxProvider *Libp2pContext, withSigning bool) (*pubsub.PubSub, pubsub.WithMessageSigning(withSigning), } + pubsub.TimeCacheDuration = pubsubTimeCacheDuration + ps, err := pubsub.NewGossipSub(ctxProvider.Context(), ctxProvider.Host(), optsPS...) if err != nil { return nil, err diff --git a/process/dataValidators/export_test.go b/process/dataValidators/export_test.go new file mode 100644 index 00000000000..29ec2b4464f --- /dev/null +++ b/process/dataValidators/export_test.go @@ -0,0 +1,3 @@ +package dataValidators + +const MaxNonceDeltaAllowed = maxNonceDeltaAllowed diff --git a/process/dataValidators/txValidator.go b/process/dataValidators/txValidator.go index 18dcb188133..3ad8a470bc8 100644 --- a/process/dataValidators/txValidator.go +++ b/process/dataValidators/txValidator.go @@ -12,6 +12,8 @@ import ( var log = logger.DefaultLogger() +const maxNonceDeltaAllowed = 100 + // TxValidator represents a tx handler validator that doesn't check the validity of provided txHandler type TxValidator struct { accounts state.AccountsAdapter @@ -55,7 +57,9 @@ func (tv *TxValidator) IsTxValidForProcessing(interceptedTx process.TxValidatorH accountNonce := accountHandler.GetNonce() txNonce := interceptedTx.Nonce() lowerNonceInTx := txNonce < accountNonce - if lowerNonceInTx { + veryHighNonceInTx := txNonce > accountNonce+maxNonceDeltaAllowed + isTxRejected := lowerNonceInTx || veryHighNonceInTx + if isTxRejected { tv.rejectedTxs++ return false } diff --git a/process/dataValidators/txValidator_test.go b/process/dataValidators/txValidator_test.go index de97fe258da..5d50dea7798 100644 --- a/process/dataValidators/txValidator_test.go +++ b/process/dataValidators/txValidator_test.go @@ -122,6 +122,24 @@ func TestTxValidator_IsTxValidForProcessingAccountNonceIsGreaterThanTxNonceShoul assert.Equal(t, false, result) } +func TestTxValidator_IsTxValidForProcessingTxNonceIsTooHigh(t *testing.T) { + t.Parallel() + + accountNonce := uint64(100) + txNonce := accountNonce + dataValidators.MaxNonceDeltaAllowed + 1 + + accounts := getAccAdapter(accountNonce, big.NewInt(0)) + shardCoordinator := createMockCoordinator("_", 0) + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + assert.Nil(t, err) + + addressMock := mock.NewAddressMock([]byte("address")) + txValidatorHandler := getTxValidatorHandler(0, txNonce, addressMock, big.NewInt(0)) + + result := txValidator.IsTxValidForProcessing(txValidatorHandler) + assert.Equal(t, false, result) +} + func TestTxValidator_IsTxValidForProcessingAccountBalanceIsLessThanTxTotalValueShouldReturnFalse(t *testing.T) { t.Parallel() diff --git a/process/errors.go b/process/errors.go index d6833eebfcd..e81978371e0 100644 --- a/process/errors.go +++ b/process/errors.go @@ -91,15 +91,9 @@ var ErrNilRootHash = errors.New("root hash is nil") // ErrWrongNonceInBlock signals the nonce in block is different than expected nonce var ErrWrongNonceInBlock = errors.New("wrong nonce in block") -// ErrWrongNonceInOtherChainBlock signals the nonce in block is different than expected nonce -var ErrWrongNonceInOtherChainBlock = errors.New("wrong nonce in other chain block") - // ErrBlockHashDoesNotMatch signals that header hash does not match with the previous one var ErrBlockHashDoesNotMatch = errors.New("block hash does not match") -// ErrBlockHashDoesNotMatchInOtherChainBlock signals that header hash does not match with the previous one -var ErrBlockHashDoesNotMatchInOtherChainBlock = errors.New("block hash does not match in other chain block") - // ErrMissingTransaction signals that one transaction is missing var ErrMissingTransaction = errors.New("missing transaction") @@ -112,9 +106,6 @@ var ErrUnmarshalWithoutSuccess = errors.New("unmarshal without success") // ErrRootStateDoesNotMatch signals that root state does not match var ErrRootStateDoesNotMatch = errors.New("root state does not match") -// ErrRootStateDoesNotMatchInOtherChainBlock signals that root state does not match -var ErrRootStateDoesNotMatchInOtherChainBlock = errors.New("root state does not match in other chain block") - // ErrAccountStateDirty signals that the accounts were modified before starting the current modification var ErrAccountStateDirty = errors.New("accountState was dirty before starting to change") @@ -301,18 +292,12 @@ var ErrNilPrevRandSeed = errors.New("provided previous rand seed is nil") // ErrNilRequestHeaderHandlerByNonce signals that a nil header request handler by nonce func was provided var ErrNilRequestHeaderHandlerByNonce = errors.New("nil request header handler by nonce") -// ErrLowerRoundInOtherChainBlock signals that header round too low for processing it -var ErrLowerRoundInOtherChainBlock = errors.New("header round is lower than last committed in other chain block") - // ErrLowerRoundInBlock signals that a header round is too low for processing it var ErrLowerRoundInBlock = errors.New("header round is lower than last committed") // ErrRandSeedDoesNotMatch signals that random seed does not match with the previous one var ErrRandSeedDoesNotMatch = errors.New("random seed do not match") -// ErrRandSeedDoesNotMatchInOtherChainBlock signals that seed does not match with the previous one -var ErrRandSeedDoesNotMatchInOtherChainBlock = errors.New("random seed does not match in other chain block") - // ErrHeaderNotFinal signals that header is not final and it should be var ErrHeaderNotFinal = errors.New("header in metablock is not final") @@ -444,3 +429,9 @@ var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") // ErrZeroMaxCleanTime signal that cleaning time for pools its less or equals with 0 var ErrZeroMaxCleanTime = errors.New("cleaning time is equal or less than zero") + +// ErrNilThrottler signals that a nil throttler has been provided +var ErrNilThrottler = errors.New("nil throttler") + +// ErrSystemBusy signals that the system is busy +var ErrSystemBusy = errors.New("system busy") diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 302eed9633b..d642fdc3b97 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -1,6 +1,7 @@ package shard import ( + "github.com/ElrondNetwork/elrond-go/core/throttler" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -16,19 +17,22 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) +const maxGoRoutinexTxInterceptor = 100 + type interceptorsContainerFactory struct { - accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - chronologyValidator process.ChronologyValidator + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + chronologyValidator process.ChronologyValidator + txInterceptorThrottler process.InterceptorThrottler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -83,19 +87,25 @@ func NewInterceptorsContainerFactory( return nil, process.ErrNilChronologyValidator } + txInterceptorThrottler, err := throttler.NewNumGoRoutineThrottler(maxGoRoutinexTxInterceptor) + if err != nil { + return nil, err + } + return &interceptorsContainerFactory{ - accounts: accounts, - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, - chronologyValidator: chronologyValidator, + accounts: accounts, + shardCoordinator: shardCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, + chronologyValidator: chronologyValidator, + txInterceptorThrottler: txInterceptorThrottler, }, nil } @@ -229,7 +239,9 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier strin icf.hasher, icf.singleSigner, icf.keyGen, - icf.shardCoordinator) + icf.shardCoordinator, + icf.txInterceptorThrottler, + ) if err != nil { return nil, err diff --git a/process/interface.go b/process/interface.go index c4abb19cb6a..368546322d0 100644 --- a/process/interface.go +++ b/process/interface.go @@ -365,3 +365,11 @@ type PoolsCleaner interface { NumRemovedTxs() uint64 IsInterfaceNil() bool } + +// InterceptorThrottler can determine if the a new joc can or cannot be started +type InterceptorThrottler interface { + CanProcess() bool + StartProcessing() + EndProcessing() + IsInterfaceNil() bool +} diff --git a/process/mock/interceptorThrottlerStub.go b/process/mock/interceptorThrottlerStub.go new file mode 100644 index 00000000000..a2013e23adc --- /dev/null +++ b/process/mock/interceptorThrottlerStub.go @@ -0,0 +1,36 @@ +package mock + +import "sync/atomic" + +type InterceptorThrottlerStub struct { + CanProcessCalled func() bool + startProcessingCount int32 + endProcessingCount int32 +} + +func (it *InterceptorThrottlerStub) CanProcess() bool { + return it.CanProcessCalled() +} + +func (it *InterceptorThrottlerStub) StartProcessing() { + atomic.AddInt32(&it.startProcessingCount, 1) +} + +func (it *InterceptorThrottlerStub) EndProcessing() { + atomic.AddInt32(&it.endProcessingCount, 1) +} + +func (it *InterceptorThrottlerStub) StartProcessingCount() int32 { + return atomic.LoadInt32(&it.startProcessingCount) +} + +func (it *InterceptorThrottlerStub) EndProcessingCount() int32 { + return atomic.LoadInt32(&it.endProcessingCount) +} + +func (it *InterceptorThrottlerStub) IsInterfaceNil() bool { + if it == nil { + return true + } + return false +} diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index c7bfef185bb..5fe01fcf37f 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -25,6 +25,7 @@ type TxInterceptor struct { keyGen crypto.KeyGenerator shardCoordinator sharding.Coordinator broadcastCallbackHandler func(buffToSend []byte) + throttler process.InterceptorThrottler } // NewTxInterceptor hooks a new interceptor for transactions @@ -37,6 +38,7 @@ func NewTxInterceptor( singleSigner crypto.SingleSigner, keyGen crypto.KeyGenerator, shardCoordinator sharding.Coordinator, + throttler process.InterceptorThrottler, ) (*TxInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -63,6 +65,9 @@ func NewTxInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if throttler == nil || throttler.IsInterfaceNil() { + return nil, process.ErrNilThrottler + } txIntercept := &TxInterceptor{ marshalizer: marshalizer, @@ -73,6 +78,7 @@ func NewTxInterceptor( singleSigner: singleSigner, keyGen: keyGen, shardCoordinator: shardCoordinator, + throttler: throttler, } return txIntercept, nil @@ -81,10 +87,17 @@ func NewTxInterceptor( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + canProcess := txi.throttler.CanProcess() + if !canProcess { + return process.ErrSystemBusy + } + + txi.throttler.StartProcessing() + defer txi.throttler.EndProcessing() + if message == nil || message.IsInterfaceNil() { return process.ErrNilMessage } - if message.Data() == nil { return process.ErrNilDataToProcess } diff --git a/process/transaction/interceptor_test.go b/process/transaction/interceptor_test.go index b67e6dee4f8..ac9352aca02 100644 --- a/process/transaction/interceptor_test.go +++ b/process/transaction/interceptor_test.go @@ -37,6 +37,7 @@ func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( nil, @@ -46,7 +47,9 @@ func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilMarshalizer, err) assert.Nil(t, txi) @@ -60,6 +63,7 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -69,7 +73,9 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilTxDataPool, err) assert.Nil(t, txi) @@ -83,6 +89,7 @@ func TestNewTxInterceptor_NilTxHandlerValidatorShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -92,7 +99,9 @@ func TestNewTxInterceptor_NilTxHandlerValidatorShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilTxHandlerValidator, err) assert.Nil(t, txi) @@ -106,6 +115,7 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -115,7 +125,9 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilAddressConverter, err) assert.Nil(t, txi) @@ -130,6 +142,7 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -139,7 +152,9 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { nil, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilHasher, err) assert.Nil(t, txi) @@ -153,6 +168,7 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -162,7 +178,9 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { mock.HasherMock{}, nil, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilSingleSigner, err) assert.Nil(t, txi) @@ -176,6 +194,7 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -185,7 +204,9 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { mock.HasherMock{}, signer, nil, - oneSharder) + oneSharder, + throttler, + ) assert.Equal(t, process.ErrNilKeyGen, err) assert.Nil(t, txi) @@ -199,6 +220,7 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -208,12 +230,40 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - nil) + nil, + throttler, + ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, txi) } +func TestNewTxInterceptor_NilThrottlerShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + + txi, err := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + nil, + ) + + assert.Equal(t, process.ErrNilThrottler, err) + assert.Nil(t, txi) +} + func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -223,6 +273,7 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -232,7 +283,9 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) assert.Nil(t, err) assert.NotNil(t, txi) @@ -240,6 +293,40 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { //------- ProcessReceivedMessage +func TestTransactionInterceptor_ProcessReceivedMessageSystemBusyShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return false + }, + } + + txi, _ := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + throttler, + ) + + err := txi.ProcessReceivedMessage(nil) + + assert.Equal(t, process.ErrSystemBusy, err) + assert.Equal(t, int32(0), throttler.StartProcessingCount()) + assert.Equal(t, int32(0), throttler.EndProcessingCount()) +} + func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *testing.T) { t.Parallel() @@ -249,6 +336,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *te oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -258,11 +350,15 @@ func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *te mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) err := txi.ProcessReceivedMessage(nil) assert.Equal(t, process.ErrNilMessage, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t *testing.T) { @@ -274,6 +370,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -283,13 +384,17 @@ func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) msg := &mock.P2PMessageMock{} err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNilDataToProcess, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarshalingShouldErr(t *testing.T) { @@ -303,6 +408,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerStub{ @@ -316,7 +426,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) msg := &mock.P2PMessageMock{ DataField: make([]byte, 0), @@ -325,6 +437,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha err := txi.ProcessReceivedMessage(msg) assert.Equal(t, errMarshalizer, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShouldErr(t *testing.T) { @@ -336,6 +450,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerStub{ @@ -352,7 +471,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) msg := &mock.P2PMessageMock{ DataField: make([]byte, 0), @@ -361,6 +482,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNoTransactionInMessage, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t *testing.T) { @@ -374,6 +497,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -383,7 +511,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -405,6 +535,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNilSignature, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsShouldErrAndFilter(t *testing.T) { @@ -432,6 +564,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -441,7 +578,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) tx1 := &dataTransaction.Transaction{ Nonce: 1, @@ -485,6 +624,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS txRecovered := &dataTransaction.Transaction{} _ = marshalizer.Unmarshal(txRecovered, txBuffRecovered[0]) assert.Equal(t, tx2, txRecovered) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t *testing.T) { @@ -509,6 +650,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t return errExpected }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -518,7 +664,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -540,6 +688,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t err := txi.ProcessReceivedMessage(msg) assert.Equal(t, errExpected, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork(t *testing.T) { @@ -567,6 +717,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -576,7 +731,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -610,6 +767,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( case <-time.After(durTimeout): assert.Fail(t, "timeout while waiting for tx to be inserted in the pool") } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWork(t *testing.T) { @@ -636,6 +795,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -645,7 +809,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor mock.HasherMock{}, signer, keyGen, - multiSharder) + multiSharder, + throttler, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -678,6 +844,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor assert.Fail(t, "should have not add tx in pool") case <-time.After(durTimeout): } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t *testing.T) { @@ -716,6 +884,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -725,7 +898,9 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * mock.HasherMock{}, signer, keyGen, - multiSharder) + multiSharder, + throttler, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -758,4 +933,6 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * assert.Fail(t, "should have not add tx in pool") case <-time.After(durTimeout): } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } From 0aa1830634f9ecc9d9b0681f1cdbbdcf07aea49c Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 2 Oct 2019 16:28:26 +0300 Subject: [PATCH 178/234] fix after merge --- .../shard/interceptorsContainerFactory.go | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index b591a916e9d..c48556a7779 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -21,19 +21,19 @@ import ( const maxGoRoutinexTxInterceptor = 100 type interceptorsContainerFactory struct { - accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - nodesCoordinator sharding.NodesCoordinator - txInterceptorThrottler process.InterceptorThrottler + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator + txInterceptorThrottler process.InterceptorThrottler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -96,7 +96,7 @@ func NewInterceptorsContainerFactory( return &interceptorsContainerFactory{ accounts: accounts, shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, + nodesCoordinator: nodesCoordinator, messenger: messenger, store: store, marshalizer: marshalizer, @@ -106,7 +106,6 @@ func NewInterceptorsContainerFactory( multiSigner: multiSigner, dataPool: dataPool, addrConverter: addrConverter, - chronologyValidator: chronologyValidator, txInterceptorThrottler: txInterceptorThrottler, }, nil } From 4da4c7cf50ca677d7a492ae7d173f0858e55823a Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 2 Oct 2019 16:30:42 +0300 Subject: [PATCH 179/234] EN-4222 : refactor MetaProcessor move all constructor parameters in a argument struct --- cmd/node/factory/structs.go | 40 +- .../smartContract/testInitilalizer.go | 58 +- integrationTests/testProcessorNode.go | 59 +- integrationTests/testSyncNode.go | 61 +- process/block/argProcessor.go | 7 + process/block/export_test.go | 19 - process/block/metablock.go | 73 +- process/block/metablock_test.go | 1194 +++++------------ 8 files changed, 506 insertions(+), 1005 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7badfc223af..2a9da1f3121 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -518,7 +518,6 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err args.state, forkDetector, shardsGenesisBlocks, - args.nodesConfig, args.coreServiceContainer, ) @@ -1441,7 +1440,6 @@ func newBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { @@ -1470,7 +1468,6 @@ func newBlockProcessorAndTracker( return nil, nil, err } - // TODO: remove nodesConfig as no longer needed with nodes coordinator available if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return newShardBlockProcessorAndTracker( resolversFinder, @@ -1482,7 +1479,6 @@ func newBlockProcessorAndTracker( state, forkDetector, shardsGenesisBlocks, - nodesConfig, coreServiceContainer, ) } @@ -1514,7 +1510,6 @@ func newShardBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { argsParser, err := smartContract.NewAtArgumentParser() @@ -1745,21 +1740,26 @@ func newMetaBlockProcessorAndTracker( return nil, nil, err } - metaProcessor, err := block.NewMetaProcessor( - coreServiceContainer, - state.AccountsAdapter, - data.MetaDatapool, - forkDetector, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - core.Hasher, - core.Marshalizer, - data.Store, - shardsGenesisBlocks, - requestHandler, - core.Uint64ByteSliceConverter, - ) + argumentsBaseProcessor := block.ArgBaseProcessor{ + Accounts: state.AccountsAdapter, + ForkDetector: forkDetector, + Hasher: core.Hasher, + Marshalizer: core.Marshalizer, + Store: data.Store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: core.Uint64ByteSliceConverter, + StartHeaders: shardsGenesisBlocks, + RequestHandler: requestHandler, + Core: coreServiceContainer, + } + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: &argumentsBaseProcessor, + DataPool: data.MetaDatapool, + } + + metaProcessor, err := block.NewMetaProcessor(arguments) if err != nil { return nil, nil, errors.New("could not create block processor: " + err.Error()) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 90024f575a8..e6764a0509f 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -760,35 +760,39 @@ func createMetaNetNode( requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic, factory.MetachainBlocksTopic) genesisBlocks := createGenesisBlocks(shardCoordinator) - blkProc, _ := block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accntAdapter, - dPool, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 + + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: &block.ArgBaseProcessor{ + Accounts: accntAdapter, + ForkDetector: &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, }, + Hasher: testHasher, + Marshalizer: testMarshalizer, + Store: store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), + Uint64Converter: uint64Converter, + StartHeaders: genesisBlocks, + RequestHandler: requestHandler, + Core: &mock.ServiceContainerMock{}, }, - shardCoordinator, - nodesCoordinator, - mock.NewSpecialAddressHandlerMock( - testAddressConverter, - shardCoordinator, - nodesCoordinator, - ), - testHasher, - testMarshalizer, - store, - genesisBlocks, - requestHandler, - uint64Converter, - ) + DataPool: dPool, + } + blkProc, _ := block.NewMetaProcessor(arguments) _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 860d15a6e0e..4f7b67bc7e0 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -443,42 +443,35 @@ func (tpn *TestProcessorNode) initBlockProcessor() { }, } + argumentsBase := &block.ArgBaseProcessor{ + Accounts: tpn.AccntState, + ForkDetector: tpn.ForkDetector, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, + } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + DataPool: tpn.MetaDataPool, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - NodesCoordinator: tpn.NodesCoordinator, - SpecialAddressHandler: tpn.SpecialAddressHandler, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, - }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, - TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + BlocksTracker: tpn.BlockTracker, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 3b7cd83e355..fe122974f80 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -91,45 +91,40 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { }, } + argumentsBase := &block.ArgBaseProcessor{ + Accounts: tpn.AccntState, + ForkDetector: nil, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, + } + if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.Rounder) - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - tpn.NodesCoordinator, - tpn.SpecialAddressHandler, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) + argumentsBase.ForkDetector = tpn.ForkDetector + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + DataPool: tpn.MetaDataPool, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder) + argumentsBase.ForkDetector = tpn.ForkDetector arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - NodesCoordinator: tpn.NodesCoordinator, - SpecialAddressHandler: tpn.SpecialAddressHandler, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, - }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, - TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + BlocksTracker: tpn.BlockTracker, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 604adc57c8d..1464ce29c91 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -38,3 +38,10 @@ type ArgShardProcessor struct { TxCoordinator process.TransactionCoordinator TxsPoolsCleaner process.PoolsCleaner } + +// ArgMetaProcessor holds all dependencies required by the process data factory in order to create +// new instances of meta processor +type ArgMetaProcessor struct { + *ArgBaseProcessor + DataPool dataRetriever.MetaPoolsHolder +} diff --git a/process/block/export_test.go b/process/block/export_test.go index 196de84e15b..ab80bcf0cfb 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -84,25 +84,6 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo return shardProcessor, err } -func NewMetaProcessorBasicSingleShard(mdp dataRetriever.MetaPoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*metaProcessor, error) { - mp, err := NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - return mp, err -} - func (mp *metaProcessor) RequestBlockHeaders(header *block.MetaBlock) (uint32, uint32) { return mp.requestShardHeaders(header) } diff --git a/process/block/metablock.go b/process/block/metablock.go index c5c49e6ce24..8e5f18c3a30 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -10,15 +10,10 @@ import ( "github.com/ElrondNetwork/elrond-go/core/serviceContainer" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/throttle" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" "github.com/ElrondNetwork/elrond-go/storage" ) @@ -44,43 +39,29 @@ type metaProcessor struct { } // NewMetaProcessor creates a new metaProcessor object -func NewMetaProcessor( - core serviceContainer.Core, - accounts state.AccountsAdapter, - dataPool dataRetriever.MetaPoolsHolder, - forkDetector process.ForkDetector, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, -) (*metaProcessor, error) { +func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - nodesCoordinator, - specialAddressHandler, - uint64Converter) + arguments.Accounts, + arguments.ForkDetector, + arguments.Hasher, + arguments.Marshalizer, + arguments.Store, + arguments.ShardCoordinator, + arguments.NodesCoordinator, + arguments.SpecialAddressHandler, + arguments.Uint64Converter) if err != nil { return nil, err } - if dataPool == nil || dataPool.IsInterfaceNil() { + if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { + if arguments.DataPool.ShardHeaders() == nil || arguments.DataPool.ShardHeaders().IsInterfaceNil() { return nil, process.ErrNilHeadersDataPool } - if requestHandler == nil || requestHandler.IsInterfaceNil() { + if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } @@ -90,30 +71,30 @@ func NewMetaProcessor( } base := &baseProcessor{ - accounts: accounts, + accounts: arguments.Accounts, blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - specialAddressHandler: specialAddressHandler, - uint64Converter: uint64Converter, - onRequestHeaderHandler: requestHandler.RequestHeader, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + forkDetector: arguments.ForkDetector, + hasher: arguments.Hasher, + marshalizer: arguments.Marshalizer, + store: arguments.Store, + shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, + uint64Converter: arguments.Uint64Converter, + onRequestHeaderHandler: arguments.RequestHandler.RequestHeader, + onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, appStatusHandler: statusHandler.NewNilStatusHandler(), } - err = base.setLastNotarizedHeadersSlice(startHeaders) + err = base.setLastNotarizedHeadersSlice(arguments.StartHeaders) if err != nil { return nil, err } mp := metaProcessor{ - core: core, + core: arguments.Core, baseProcessor: base, - dataPool: dataPool, + dataPool: arguments.DataPool, headersCounter: NewHeaderCounter(), } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 271413ff620..650ffecfb0b 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -20,6 +20,29 @@ import ( "github.com/stretchr/testify/assert" ) +func CreateMockMetaArguments() blproc.ArgMetaProcessor { + mdp := initMetaDataPool() + shardCoordinator := mock.NewOneShardCoordinatorMock() + arguments := blproc.ArgMetaProcessor{ + ArgBaseProcessor: &blproc.ArgBaseProcessor{ + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: &mock.ChainStorerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: mock.NewNodesCoordinatorMock(), + SpecialAddressHandler: &mock.SpecialAddressHandlerMock{}, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: createGenesisBlocks(shardCoordinator), + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, + }, + DataPool: mdp, + } + return arguments +} + func createMetaBlockHeader() *block.MetaBlock { hdr := block.MetaBlock{ Nonce: 1, @@ -124,22 +147,10 @@ func setLastNotarizedHdr( func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - nil, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Accounts = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilAccountsAdapter, err) assert.Nil(t, be) } @@ -147,21 +158,10 @@ func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { t.Parallel() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - nil, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilDataPoolHolder, err) assert.Nil(t, be) } @@ -169,22 +169,10 @@ func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - nil, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.ForkDetector = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilForkDetector, err) assert.Nil(t, be) } @@ -192,22 +180,10 @@ func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - nil, - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.ShardCoordinator = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, be) } @@ -215,22 +191,10 @@ func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - nil, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Hasher = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilHasher, err) assert.Nil(t, be) } @@ -238,22 +202,10 @@ func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - nil, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Marshalizer = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilMarshalizer, err) assert.Nil(t, be) } @@ -261,22 +213,10 @@ func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - nil, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Store = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilStorage, err) assert.Nil(t, be) } @@ -284,22 +224,10 @@ func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - nil, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.RequestHandler = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilRequestHandler, err) assert.Nil(t, be) } @@ -307,9 +235,9 @@ func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, err := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + + mp, err := blproc.NewMetaProcessor(arguments) assert.Nil(t, err) assert.NotNil(t, mp) } @@ -319,9 +247,8 @@ func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(nil, &block.MetaBlock{}, blk, haveTime) @@ -331,9 +258,9 @@ func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(&blockchain.MetaChain{}, nil, blk, haveTime) @@ -343,9 +270,8 @@ func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) err := mp.ProcessBlock(&blockchain.MetaChain{}, &block.MetaBlock{}, nil, haveTime) assert.Equal(t, process.ErrNilBlockBody, err) @@ -354,9 +280,8 @@ func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(&blockchain.MetaChain{}, &block.MetaBlock{}, blk, nil) @@ -366,7 +291,6 @@ func TestMetaProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() // set accounts dirty journalLen := func() int { return 3 } revToSnapshot := func(snapshot int) error { return nil } @@ -379,24 +303,13 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { RootHash: []byte("roothash"), } body := &block.MetaBlockBody{} - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + } + mp, _ := blproc.NewMetaProcessor(arguments) + // should return err err := mp.ProcessBlock(blkc, &hdr, body, haveTime) assert.NotNil(t, err) @@ -406,9 +319,8 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{} hdr := &block.MetaBlock{ @@ -422,9 +334,8 @@ func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Round: 1, @@ -436,6 +347,7 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { Nonce: 3, } body := &block.MetaBlockBody{} + err := mp.ProcessBlock(blkc, hdr, body, haveTime) assert.Equal(t, process.ErrWrongNonceInBlock, err) } @@ -443,9 +355,8 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Round: 1, @@ -459,6 +370,7 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T } body := &block.MetaBlockBody{} + err := mp.ProcessBlock(blkc, hdr, body, haveTime) assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) } @@ -466,7 +378,6 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Nonce: 0, @@ -484,25 +395,13 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState rootHashCalled := func() ([]byte, error) { return []byte("rootHashX"), nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + mp, _ := blproc.NewMetaProcessor(arguments) go func() { mp.ChRcvAllHdrs() <- true @@ -522,26 +421,13 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + mp, _ := blproc.NewMetaProcessor(arguments) txHash := []byte("txhash") txHashes := make([][]byte, 0) @@ -583,21 +469,11 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { accounts.RevertToSnapshotCalled = func(snapshot int) error { return nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + arguments.DataPool = mdp + mp, _ := blproc.NewMetaProcessor(arguments) mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { cs := &mock.Uint64SyncMapCacherStub{} cs.GetCalled = func(key uint64) (dataRetriever.ShardIdHashMap, bool) { @@ -621,26 +497,13 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.CommitBlock(nil, &block.MetaBlock{}, blk) assert.Equal(t, process.ErrNilBlockChain, err) @@ -649,7 +512,6 @@ func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() accounts := &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { return nil @@ -667,21 +529,10 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T return []byte("obj"), nil }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - marshalizer, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Accounts = accounts + arguments.Marshalizer = marshalizer + mp, _ := blproc.NewMetaProcessor(arguments) blkc := createTestBlockchain() err := mp.CommitBlock(blkc, hdr, body) assert.Equal(t, errMarshalizer, err) @@ -690,7 +541,6 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() wasCalled := false errPersister := errors.New("failure") accounts := &mock.AccountsStub{ @@ -709,28 +559,18 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.MetaBlockUnit, hdrUnit) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = accounts + arguments.Store = store + arguments.ForkDetector = &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { + return nil }, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + } + mp, _ := blproc.NewMetaProcessor(arguments) blkc, _ := blockchain.NewMetaChain( generateTestCache(), @@ -753,21 +593,11 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { body := &block.MetaBlockBody{} store := initStore() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Accounts = accounts + arguments.Store = store + arguments.DataPool = mdp + mp, _ := blproc.NewMetaProcessor(arguments) mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { return nil @@ -793,21 +623,13 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { hasher := &mock.HasherStub{} store := initStore() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - fd, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = mdp + arguments.Accounts = accounts + arguments.ForkDetector = fd + arguments.Store = store + arguments.Hasher = hasher + mp, _ := blproc.NewMetaProcessor(arguments) mdp.ShardHeadersCalled = func() storage.Cacher { return &mock.CacherStub{ @@ -860,21 +682,13 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.BlockHeaderUnit, blockHeaderUnit) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - fd, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = mdp + arguments.Accounts = accounts + arguments.ForkDetector = fd + arguments.Store = store + arguments.Hasher = hasher + mp, _ := blproc.NewMetaProcessor(arguments) removeHdrWasCalled := false mdp.ShardHeadersCalled = func() storage.Cacher { @@ -909,21 +723,12 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { t.Parallel() mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + arguments := CreateMockMetaArguments() + arguments.DataPool = mdp + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + mdp.ShardHeadersCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -947,22 +752,11 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.RemoveBlockInfoFromPool(nil) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMetaBlockHeader) @@ -971,23 +765,12 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *tes func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) header := createMetaBlockHeader() + err := mp.RemoveBlockInfoFromPool(header) assert.Nil(t, err) } @@ -995,26 +778,17 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFail(t *testing.T) { t.Parallel() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: func() int { - return 1 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: func() int { + return 1 }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } + hdr, err := mp.CreateBlockHeader(nil, 0, haveTime) assert.NotNil(t, err) assert.Nil(t, hdr) @@ -1023,30 +797,20 @@ func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFai func TestMetaProcessor_CreateBlockHeaderShouldWork(t *testing.T) { t.Parallel() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: func() int { - return 0 - }, - RootHashCalled: func() ([]byte, error) { - return []byte("root"), nil - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - + RootHashCalled: func() ([]byte, error) { + return []byte("root"), nil + }, + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } + hdr, err := mp.CreateBlockHeader(nil, 0, haveTime) assert.Nil(t, err) assert.NotNil(t, hdr) @@ -1061,23 +825,15 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) journalEntries = 0 return nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: revToSnapshot, - }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: revToSnapshot, + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.CommitBlock(nil, nil, nil) assert.NotNil(t, err) assert.Equal(t, 0, journalEntries) @@ -1086,22 +842,9 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) msh, mstx, err := mp.MarshalizedDataToBroadcast(&block.MetaBlock{}, &block.MetaBlockBody{}) assert.Nil(t, err) @@ -1114,25 +857,11 @@ func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - pool, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = pool + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) //add 3 tx hashes on requested list hdrHash1 := []byte("hdr hash 1") @@ -1158,12 +887,8 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1205,29 +930,22 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) ShardId: 2, MiniBlockHeaders: miniBlockHeaders3}) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + noOfShards := uint32(5) + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(5), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(5)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } round := uint64(10) @@ -1255,12 +973,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1286,29 +1000,21 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(5)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1370,12 +1076,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1405,29 +1107,21 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1538,12 +1232,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1573,29 +1263,21 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1706,22 +1388,10 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { func TestMetaProcessor_RestoreBlockIntoPoolsShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.RestoreBlockIntoPools(nil, nil) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMetaBlockHeader) @@ -1732,8 +1402,6 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() marshalizerMock := &mock.MarshalizerMock{} - hasherMock := &mock.HasherStub{} - body := &block.MetaBlockBody{} hdr := block.Header{Nonce: 1} buffHdr, _ := marshalizerMock.Marshal(hdr) @@ -1752,21 +1420,10 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - pool, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasherMock, - marshalizerMock, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + arguments.DataPool = pool + arguments.Store = store + mp, _ := blproc.NewMetaProcessor(arguments) mhdr := createMetaBlockHeader() @@ -1780,34 +1437,24 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.Hasher = &mock.HasherMock{} + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -1875,34 +1522,24 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.Hasher = &mock.HasherMock{} + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -1971,34 +1608,23 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2030,34 +1656,24 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2095,34 +1711,23 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2209,34 +1814,23 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2318,34 +1912,24 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + + arguments := CreateMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2440,23 +2024,10 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { func TestMetaProcessor_DecodeBlockBody(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() + marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) body := &block.MetaBlockBody{} message, err := marshalizerMock.Marshal(body) assert.Nil(t, err) @@ -2470,23 +2041,10 @@ func TestMetaProcessor_DecodeBlockBody(t *testing.T) { func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() + marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) hdr := &block.MetaBlock{} hdr.Nonce = 1 hdr.TimeStamp = uint64(0) @@ -2508,26 +2066,8 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - if err != nil { - assert.NotNil(t, err) - } + arguments := CreateMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) numberOfShards := uint32(4) type DataForMap struct { From a7f5ce52afbbb1e1a93fd628ef89095f1cf2f591 Mon Sep 17 00:00:00 2001 From: Sebastian Marian <36901555+SebastianMarian@users.noreply.github.com> Date: Wed, 2 Oct 2019 17:11:59 +0300 Subject: [PATCH 180/234] En 4184 fix print info and throttle size (#494) * First step of refactor * Skipped some unit tests because of the min limit value of the items which could be put in one block which was set to 15000 (equal with the max limit) * Fixed a problem when the final nonce should be set in fork detector * Fixed some prints info * Improved mutex (R)Lock/(R)Unlock * Renamed some variables * Fixed unit tests * Added checks for mini blocks limit which could be included in one block * Added unit tests and extract some duplicate code in one method * Removed mini block throttle for intermediate results of SC * Increased cache size of mini blocks up to 300 (for 100 shards: 100 mini blocks of txs, 100 mini blocks of rewards, 100 mini blocks of intermediate results of SC) * Fixed return management in computeOrderedTxs * Removed unused code --- cmd/node/config/config.toml | 2 +- core/computers.go | 10 ++++- core/computers_test.go | 20 +++++++-- core/constants.go | 3 ++ .../intermediateTransactionHandlerMock.go | 2 +- process/block/displayBlock.go | 5 ++- process/block/metablock.go | 2 +- process/block/preprocess/transactions.go | 42 ++++++++++++++----- process/block/preprocess/transactions_test.go | 4 +- process/block/shardblock.go | 33 ++++++++++++--- process/constants.go | 3 +- process/errors.go | 3 ++ .../intermediateTransactionHandlerMock.go | 2 +- process/sync/baseForkDetector.go | 5 +-- process/sync/baseForkDetector_test.go | 33 +++++++++++++++ process/sync/export_test.go | 4 ++ process/sync/shardForkDetector.go | 4 +- process/throttle/block.go | 4 +- process/throttle/block_test.go | 11 ++--- 19 files changed, 149 insertions(+), 43 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index f06c2570611..69f3b3a5d46 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -19,7 +19,7 @@ [MiniBlocksStorage] [MiniBlocksStorage.Cache] - Size = 100 + Size = 300 Type = "LRU" [MiniBlocksStorage.DB] FilePath = "MiniBlocks" diff --git a/core/computers.go b/core/computers.go index adb9b9aab6e..c6c7b47c08f 100644 --- a/core/computers.go +++ b/core/computers.go @@ -5,13 +5,21 @@ import ( ) // Max returns the maximum number between two given -func Max(a uint32, b uint32) uint32 { +func Max(a int32, b int32) int32 { if a > b { return a } return b } +// Min returns the minimum number between two given +func Min(a int32, b int32) int32 { + if a < b { + return a + } + return b +} + // IsHashInList signals if the given hash exists in the given list of hashes func IsHashInList(hash []byte, hashes [][]byte) bool { for i := 0; i < len(hashes); i++ { diff --git a/core/computers_test.go b/core/computers_test.go index 0d71dffe362..3a08d506bab 100644 --- a/core/computers_test.go +++ b/core/computers_test.go @@ -8,13 +8,25 @@ import ( ) func TestMaxShouldReturnA(t *testing.T) { - a := uint32(11) - b := uint32(10) + a := int32(11) + b := int32(10) assert.Equal(t, a, core.Max(a, b)) } func TestMaxShouldReturnB(t *testing.T) { - a := uint32(10) - b := uint32(11) + a := int32(10) + b := int32(11) assert.Equal(t, b, core.Max(a, b)) } + +func TestMinShouldReturnB(t *testing.T) { + a := int32(11) + b := int32(10) + assert.Equal(t, b, core.Min(a, b)) +} + +func TestMinShouldReturnA(t *testing.T) { + a := int32(10) + b := int32(11) + assert.Equal(t, a, core.Min(a, b)) +} diff --git a/core/constants.go b/core/constants.go index 6763676ed1c..e88ebd0aab9 100644 --- a/core/constants.go +++ b/core/constants.go @@ -153,5 +153,8 @@ const MetricNumShardHeadersProcessed = "erd_num_shard_headers_processed" // MetricNumTimesInForkChoice is the metric that counts how many time a node was in fork choice const MetricNumTimesInForkChoice = "erd_fork_choice_count" +// MaxMiniBlocksInBlock specifies the max number of mini blocks which can be added in one block +const MaxMiniBlocksInBlock = 100 + //MetricHighestFinalBlockInShard is the metric that stores the highest nonce block notarized by metachain for current shard const MetricHighestFinalBlockInShard = "erd_highest_notarized_block_by_metachain_for_current_shard" diff --git a/integrationTests/mock/intermediateTransactionHandlerMock.go b/integrationTests/mock/intermediateTransactionHandlerMock.go index 2967c465e04..e553b2472dc 100644 --- a/integrationTests/mock/intermediateTransactionHandlerMock.go +++ b/integrationTests/mock/intermediateTransactionHandlerMock.go @@ -51,7 +51,7 @@ func (ith *IntermediateTransactionHandlerMock) SaveCurrentIntermediateTxToStorag func (ith *IntermediateTransactionHandlerMock) CreateBlockStarted() { if ith.CreateBlockStartedCalled != nil { - ith.CreateAllInterMiniBlocksCalled() + ith.CreateBlockStarted() } } diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 085189bb40c..24e8db47eed 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -180,7 +180,10 @@ func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, bod for i := 0; i < len(body); i++ { miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d->%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) + part := fmt.Sprintf("%s_MiniBlock_%d->%d", + miniBlock.Type.String(), + miniBlock.SenderShardID, + miniBlock.ReceiverShardID) if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/metablock.go b/process/block/metablock.go index c5c49e6ce24..9e6d03e2526 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1210,7 +1210,7 @@ func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round u mp.blockSizeThrottler.Add( round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) + uint32(core.Max(int32(header.ItemsInBody()), int32(header.ItemsInHeader())))) return header, nil } diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 141da4e5e42..7aece2597e0 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -507,7 +507,6 @@ func (txs *transactions) CreateAndProcessMiniBlock( miniBlock.ReceiverShardID = dstShardId miniBlock.TxHashes = make([][]byte, 0) miniBlock.Type = block.TxBlock - log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard id %d\n", len(orderedTxs), miniBlock.ReceiverShardID)) addedTxs := 0 addedGasLimitPerCrossShardMiniblock := uint64(0) @@ -525,7 +524,11 @@ func (txs *transactions) CreateAndProcessMiniBlock( currTxGasLimit = orderedTxs[index].GasLimit } - if addedGasLimitPerCrossShardMiniblock+currTxGasLimit > process.MaxGasLimitPerMiniBlock { + isGasLimitReached := addedGasLimitPerCrossShardMiniblock+currTxGasLimit > process.MaxGasLimitPerMiniBlock + if isGasLimitReached { + log.Info(fmt.Sprintf("max gas limit per mini block is reached: added %d txs from %d txs\n", + len(miniBlock.TxHashes), + len(orderedTxs))) continue } @@ -554,7 +557,9 @@ func (txs *transactions) CreateAndProcessMiniBlock( addedGasLimitPerCrossShardMiniblock += currTxGasLimit if addedTxs >= spaceRemained { // max transactions count in one block was reached - log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxs))) + log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", + len(miniBlock.TxHashes), + len(orderedTxs))) return miniBlock, nil } } @@ -570,7 +575,14 @@ func (txs *transactions) computeOrderedTxs( var err error strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - txStore := txs.txPool.ShardDataStore(strCache) + txShardPool := txs.txPool.ShardDataStore(strCache) + + if txShardPool == nil { + return nil, nil, process.ErrNilTxDataPool + } + if txShardPool.Len() == 0 { + return nil, nil, process.ErrEmptyTxDataPool + } txs.mutOrderedTxs.RLock() orderedTxs := txs.orderedTxs[strCache] @@ -579,7 +591,15 @@ func (txs *transactions) computeOrderedTxs( alreadyOrdered := len(orderedTxs) > 0 if !alreadyOrdered { - orderedTxs, orderedTxHashes, err = SortTxByNonce(txStore) + orderedTxs, orderedTxHashes, err = SortTxByNonce(txShardPool) + if err != nil { + return nil, nil, err + } + + log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard %d from shard %d\n", + len(orderedTxs), + dstShardId, + sndShardId)) txs.mutOrderedTxs.Lock() txs.orderedTxs[strCache] = orderedTxs @@ -587,7 +607,7 @@ func (txs *transactions) computeOrderedTxs( txs.mutOrderedTxs.Unlock() } - return orderedTxs, orderedTxHashes, err + return orderedTxs, orderedTxHashes, nil } // ProcessMiniBlock processes all the transactions from a and saves the processed transactions in local cache complete miniblock @@ -625,9 +645,9 @@ func (txs *transactions) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime f } // SortTxByNonce sort transactions according to nonces -func SortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { - if txShardStore == nil { - return nil, nil, process.ErrNilCacher +func SortTxByNonce(txShardPool storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { + if txShardPool == nil { + return nil, nil, process.ErrNilTxDataPool } transactions := make([]*transaction.Transaction, 0) @@ -638,8 +658,8 @@ func SortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][ nonces := make([]uint64, 0) - for _, key := range txShardStore.Keys() { - val, _ := txShardStore.Peek(key) + for _, key := range txShardPool.Keys() { + val, _ := txShardPool.Peek(key) if val == nil { continue } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 0ea75f30432..09cf2d50564 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -730,12 +730,12 @@ func init() { r = rand.New(rand.NewSource(time.Now().UnixNano())) } -func TestSortTxByNonce_NilCacherShouldErr(t *testing.T) { +func TestSortTxByNonce_NilTxDataPoolShouldErr(t *testing.T) { t.Parallel() transactions, txHashes, err := SortTxByNonce(nil) assert.Nil(t, transactions) assert.Nil(t, txHashes) - assert.Equal(t, process.ErrNilCacher, err) + assert.Equal(t, process.ErrNilTxDataPool, err) } func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index ace9702aa6a..53affc3d7a0 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -829,7 +829,7 @@ func (sp *shardProcessor) CommitBlock( func (sp *shardProcessor) cleanTxsPools() { _, err := sp.txsPoolsCleaner.Clean(maxCleanTime) log.LogIfError(err) - log.Info(fmt.Sprintf("Total txs removed from pools cleaner %d", sp.txsPoolsCleaner.NumRemovedTxs())) + log.Info(fmt.Sprintf("%d txs have been removed from pools after cleaning\n", sp.txsPoolsCleaner.NumRemovedTxs())) } // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain @@ -1446,6 +1446,11 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( break } + if len(miniBlocks) >= core.MaxMiniBlocksInBlock { + log.Info(fmt.Sprintf("%d max number of mini blocks allowed to be added in one shard block has been reached\n", len(miniBlocks))) + break + } + itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) if itemsAddedInHeader >= maxItemsInBlock { log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) @@ -1479,7 +1484,10 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( } maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 + maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( + maxItemsInBlock, + itemsAddedInHeader+1, + uint32(len(miniBlocks))) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { processedMiniBlocksHashes := sp.getProcessedMiniBlocksHashes(orderedMetaBlocks[i].hash) @@ -1552,14 +1560,17 @@ func (sp *shardProcessor) createMiniBlocks( return nil, err } - log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) + log.Info(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) if len(destMeMiniBlocks) > 0 { miniBlocks = append(miniBlocks, destMeMiniBlocks...) } maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) + maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( + maxItemsInBlock, + uint32(len(destMeMiniBlocks)+len(usedMetaHdrsHashes)), + uint32(len(miniBlocks))) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( @@ -1639,7 +1650,7 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round sp.blockSizeThrottler.Add( round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) + uint32(core.Max(int32(header.ItemsInBody()), int32(header.ItemsInHeader())))) return header, nil } @@ -1779,3 +1790,15 @@ func (sp *shardProcessor) isMiniBlockProcessed(metaBlockHash []byte, miniBlockHa return isProcessed } + +func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( + maxItemsInBlock uint32, + itemsAddedInBlock uint32, + miniBlocksAddedInBlock uint32, +) int32 { + mbSpaceRemainedInBlock := int32(maxItemsInBlock) - int32(itemsAddedInBlock) + mbSpaceRemainedInCache := int32(core.MaxMiniBlocksInBlock) - int32(miniBlocksAddedInBlock) + maxMbSpaceRemained := core.Min(mbSpaceRemainedInBlock, mbSpaceRemainedInCache) + + return maxMbSpaceRemained +} diff --git a/process/constants.go b/process/constants.go index 1b7e74f48c2..9d87a1ed5d6 100644 --- a/process/constants.go +++ b/process/constants.go @@ -34,9 +34,8 @@ const ShardBlockFinality = 1 const MetaBlockFinality = 1 const MaxHeaderRequestsAllowed = 10 const MaxItemsInBlock = 15000 -const MinItemsInBlock = 1000 +const MinItemsInBlock = 15000 const MaxNoncesDifference = 5 -const GenesisBlockNonce = 0 // TODO - calculate exactly in case of the VM, for every VM to have a similar constant, operations / seconds const MaxGasLimitPerMiniBlock = uint64(100000) diff --git a/process/errors.go b/process/errors.go index 72db2088f8a..48f91a46d65 100644 --- a/process/errors.go +++ b/process/errors.go @@ -145,6 +145,9 @@ var ErrNilMessenger = errors.New("nil Messenger") // ErrNilTxDataPool signals that a nil transaction pool has been provided var ErrNilTxDataPool = errors.New("nil transaction data pool") +// ErrEmptyTxDataPool signals that a empty transaction pool has been provided +var ErrEmptyTxDataPool = errors.New("empty transaction data pool") + // ErrNilHeadersDataPool signals that a nil headers pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") diff --git a/process/mock/intermediateTransactionHandlerMock.go b/process/mock/intermediateTransactionHandlerMock.go index f78983821ae..75ea84b3276 100644 --- a/process/mock/intermediateTransactionHandlerMock.go +++ b/process/mock/intermediateTransactionHandlerMock.go @@ -52,7 +52,7 @@ func (ith *IntermediateTransactionHandlerMock) SaveCurrentIntermediateTxToStorag func (ith *IntermediateTransactionHandlerMock) CreateBlockStarted() { if ith.CreateBlockStartedCalled != nil { - ith.CreateAllInterMiniBlocksCalled() + ith.CreateBlockStartedCalled() } } diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index d267f1cb00b..5bccaa97a80 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -185,10 +185,8 @@ func (bfd *baseForkDetector) RemoveHeaders(nonce uint64, hash []byte) { var preservedHdrInfos []*headerInfo - bfd.mutHeaders.RLock() + bfd.mutHeaders.Lock() hdrInfos := bfd.headers[nonce] - bfd.mutHeaders.RUnlock() - for _, hdrInfoStored := range hdrInfos { if bytes.Equal(hdrInfoStored.hash, hash) { continue @@ -197,7 +195,6 @@ func (bfd *baseForkDetector) RemoveHeaders(nonce uint64, hash []byte) { preservedHdrInfos = append(preservedHdrInfos, hdrInfoStored) } - bfd.mutHeaders.Lock() if preservedHdrInfos == nil { delete(bfd.headers, nonce) } else { diff --git a/process/sync/baseForkDetector_test.go b/process/sync/baseForkDetector_test.go index 1c901893c81..6ed06afb577 100644 --- a/process/sync/baseForkDetector_test.go +++ b/process/sync/baseForkDetector_test.go @@ -837,3 +837,36 @@ func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.MetaBlockFinality) assert.Equal(t, sync.ErrLowerRoundInBlock, err) } + +func TestShardForkDetector_AddFinalHeadersShouldNotChangeTheFinalCheckpoint(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + hdr1 := &block.Header{Nonce: 3, Round: 3} + hash1 := []byte("hash1") + hdr2 := &block.Header{Nonce: 1, Round: 1} + hash2 := []byte("hash2") + hdr3 := &block.Header{Nonce: 4, Round: 5} + hash3 := []byte("hash3") + + hdrs := make([]data.HeaderHandler, 0) + hashes := make([][]byte, 0) + hdrs = append(hdrs, hdr1) + hashes = append(hashes, hash1) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) + + hdrs = make([]data.HeaderHandler, 0) + hashes = make([][]byte, 0) + hdrs = append(hdrs, hdr2) + hashes = append(hashes, hash2) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) + + hdrs = make([]data.HeaderHandler, 0) + hashes = make([][]byte, 0) + hdrs = append(hdrs, hdr3) + hashes = append(hashes, hash3) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr3.Nonce, sfd.FinalCheckpointNonce()) +} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index d99cdcbc29e..44a6a37fb24 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -280,3 +280,7 @@ func (bfd *baseForkDetector) ShouldAddBlockInForkDetector(header data.HeaderHand func (bfd *baseForkDetector) SetProbableHighestNonce(nonce uint64) { bfd.setProbableHighestNonce(nonce) } + +func (sfd *shardForkDetector) AddFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { + sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) +} diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index 7e3faa1cf7c..82cc215560d 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -82,8 +82,8 @@ func (sfd *shardForkDetector) AddHeader( func (sfd *shardForkDetector) addFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { finalCheckpointWasSet := false for i := 0; i < len(finalHeaders); i++ { - isFinalHeaderNonceHigherThanGenesis := finalHeaders[i].GetNonce() > process.GenesisBlockNonce - if isFinalHeaderNonceHigherThanGenesis { + isFinalHeaderNonceNotLowerThanCurrent := finalHeaders[i].GetNonce() >= sfd.finalCheckpoint().nonce + if isFinalHeaderNonceNotLowerThanCurrent { if !finalCheckpointWasSet { sfd.setFinalCheckpoint(&checkpointInfo{nonce: finalHeaders[i].GetNonce(), round: finalHeaders[i].GetRound()}) finalCheckpointWasSet = true diff --git a/process/throttle/block.go b/process/throttle/block.go index ed7b51d2e64..bf7c7fe553e 100644 --- a/process/throttle/block.go +++ b/process/throttle/block.go @@ -112,7 +112,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenSucceed(lastActionMaxItems uint32) return noOfMaxItemsUsedWithoutSucceed } - increasedNoOfItems := core.Max(1, uint32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor)) + increasedNoOfItems := uint32(core.Max(1, int32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor))) return lastActionMaxItems + increasedNoOfItems } @@ -136,7 +136,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenNotSucceed(lastActionMaxItems uint3 return noOfMaxItemsUsedWithSucceed } - decreasedNoOfItems := core.Max(1, uint32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor)) + decreasedNoOfItems := uint32(core.Max(1, int32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor))) return lastActionMaxItems - decreasedNoOfItems } diff --git a/process/throttle/block_test.go b/process/throttle/block_test.go index 4ebf7039e3d..732ca9c29e3 100644 --- a/process/throttle/block_test.go +++ b/process/throttle/block_test.go @@ -3,6 +3,7 @@ package throttle_test import ( "testing" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/stretchr/testify/assert" @@ -133,7 +134,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToMinItemsInBlockWhen func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhenLastActionNotSucceed(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - lastActionMaxItems1 := uint32(12000) + lastActionMaxItems1 := uint32(core.Max(12000, process.MinItemsInBlock)) bst.SetMaxItems(lastActionMaxItems1) bst.Add(2, 0) bst.SetSucceed(2, false) @@ -142,7 +143,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhen assert.Equal(t, decreasedValue, bst.MaxItemsToAdd()) bst.SetSucceed(2, true) - lastActionMaxItems2 := uint32(14000) + lastActionMaxItems2 := uint32(core.Max(14000, process.MinItemsInBlock)) bst.SetMaxItems(lastActionMaxItems2) bst.Add(3, 0) bst.SetSucceed(3, false) @@ -178,12 +179,12 @@ func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldReturnNoOfMaxItemsUsedWit func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItemsWithAtLeastOneUnit(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithoutSucceed := uint32(process.MinItemsInBlock + 1) + maxItemsUsedWithoutSucceed := uint32(core.Min(process.MinItemsInBlock+1, process.MaxItemsInBlock)) bst.SetMaxItems(maxItemsUsedWithoutSucceed) bst.Add(2, 0) maxItemsWhenSucceed := bst.GetMaxItemsWhenSucceed(process.MinItemsInBlock) - assert.Equal(t, uint32(process.MinItemsInBlock+1), maxItemsWhenSucceed) + assert.Equal(t, uint32(core.Min(process.MinItemsInBlock+1, process.MaxItemsInBlock)), maxItemsWhenSucceed) } func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItems(t *testing.T) { @@ -257,7 +258,7 @@ func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItemsWithAt func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItems(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithSucceed := uint32(7000) + maxItemsUsedWithSucceed := uint32(core.Max(7000, process.MinItemsInBlock)) bst.SetMaxItems(maxItemsUsedWithSucceed) bst.Add(2, 0) bst.SetSucceed(2, true) From 245f03bf93e51174d0357a67cf2ce1b22b4e7b89 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Wed, 2 Oct 2019 17:23:55 +0300 Subject: [PATCH 181/234] * Changed the approach for requested/received missing meta headers --- process/block/shardblock.go | 172 ++++++++++++++++++------------------ 1 file changed, 85 insertions(+), 87 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 1b5e0be5c30..766ef1ad931 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -20,6 +20,11 @@ import ( const maxCleanTime = time.Second +type nonceAndHashInfo struct { + hash []byte + nonce uint64 +} + type hdrInfo struct { hdr data.HeaderHandler } @@ -38,9 +43,9 @@ type shardProcessor struct { blocksTracker process.BlocksTracker metaBlockFinality int - chRcvAllMetaHdrs chan bool - mutUsedMetaHdrsHashes sync.Mutex - usedMetaHdrsHashes map[uint64][][]byte + chRcvAllMetaHdrs chan bool + //mutUsedMetaHdrsHashes sync.Mutex + //usedMetaHdrsHashes map[uint64][][]byte hdrsForCurrBlock hdrForBlock currHighestMetaHdrNonce uint64 @@ -130,7 +135,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) - sp.usedMetaHdrsHashes = make(map[uint64][][]byte) sp.processedMiniBlocks = make(map[string]map[string]struct{}) metaBlockPool := sp.dataPool.MetaBlocks() @@ -263,7 +267,7 @@ func (sp *shardProcessor) ProcessBlock( } }() - processedMetaHdrs, err := sp.getProcessedMetaBlocksFromMiniBlocks(body, header.MetaBlockHashes) + processedMetaHdrs, err := sp.getProcessedMetaBlocksFromMiniBlocks(body) if err != nil { return err } @@ -632,7 +636,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - sp.txCoordinator.CreateBlockStarted() + sp.CreateBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) @@ -907,29 +911,23 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaHeaders, usedMbs, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, header.MetaBlockHashes) + processedMetaHeaders, processedCrossMiniBlocksHashes, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) if err != nil { return nil, err } sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, metaBlockHash := range header.MetaBlockHashes { - value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] - if !ok { - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return nil, process.ErrNilMetaBlockHeader - } - - metaBlock, ok := value.hdr.(*block.MetaBlock) + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil, process.ErrWrongTypeAssertion } crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range crossMiniBlockHashes { - if usedMbs[key] { - sp.addProcessedMiniBlock(metaBlockHash, []byte(key)) + for hash := range crossMiniBlockHashes { + if processedCrossMiniBlocksHashes[hash] { + sp.addProcessedMiniBlock([]byte(metaBlockHash), []byte(hash)) } } } @@ -941,50 +939,52 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) // getProcessedMetaBlocks returns all the meta blocks fully processed func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( usedMiniBlocks []*block.MiniBlock, - usedMetaBlockHashes [][]byte, ) ([]data.HeaderHandler, error) { - if usedMiniBlocks == nil || usedMetaBlockHashes == nil { + + nrMiniBlocksUsed := len(usedMiniBlocks) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + nrMetaBlocksUsed := len(sp.hdrsForCurrBlock.hdrHashAndInfo) + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + if nrMiniBlocksUsed == 0 || nrMetaBlocksUsed == 0 { // not an error, it can happen that no metablock hdr or no miniblock is used. return make([]data.HeaderHandler, 0), nil } miniBlockHashes := make(map[int][]byte, 0) for i := 0; i < len(usedMiniBlocks); i++ { - miniBlock := usedMiniBlocks[i] - if miniBlock.SenderShardID == sp.shardCoordinator.SelfId() { + if usedMiniBlocks[i].SenderShardID == sp.shardCoordinator.SelfId() { continue } - mbHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, miniBlock) + miniBlockHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, usedMiniBlocks[i]) if err != nil { log.Debug(err.Error()) continue } - miniBlockHashes[i] = mbHash + + miniBlockHashes[i] = miniBlockHash } log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaBlocks, _, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes, usedMetaBlockHashes) + processedMetaBlocks, _, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) return processedMetaBlocks, err } func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( miniBlockHashes map[int][]byte, - usedMetaBlockHashes [][]byte, ) ([]data.HeaderHandler, map[string]bool, error) { processedMetaHdrs := make([]data.HeaderHandler, 0) - processedMBs := make(map[string]bool) - - for _, metaBlockKey := range usedMetaBlockHashes { - obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if obj == nil { - return nil, nil, process.ErrNilMetaBlockHeader - } + processedCrossMiniBlocksHashes := make(map[string]bool) - metaBlock, ok := obj.(*block.MetaBlock) + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil, nil, process.ErrWrongTypeAssertion } @@ -992,16 +992,16 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { - processedMBs[hash] = sp.isMiniBlockProcessed(metaBlockKey, []byte(hash)) + processedCrossMiniBlocksHashes[hash] = sp.isMiniBlockProcessed([]byte(metaBlockHash), []byte(hash)) } - for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[string(miniBlockHashes[key])] + for key, miniBlockHash := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHash)] if !ok { continue } - processedMBs[string(miniBlockHashes[key])] = true + processedCrossMiniBlocksHashes[string(miniBlockHash)] = true delete(miniBlockHashes, key) } @@ -1009,8 +1009,8 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( log.Debug(fmt.Sprintf("cross mini blocks in meta hdr: %d\n", len(crossMiniBlockHashes))) processedAll := true - for key := range crossMiniBlockHashes { - if !processedMBs[key] { + for hash := range crossMiniBlockHashes { + if !processedCrossMiniBlocksHashes[hash] { processedAll = false break } @@ -1020,8 +1020,9 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( processedMetaHdrs = append(processedMetaHdrs, metaBlock) } } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return processedMetaHdrs, processedMBs, nil + return processedMetaHdrs, processedCrossMiniBlocksHashes, nil } func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { @@ -1395,47 +1396,33 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( maxItemsInBlock uint32, round uint64, haveTime func() bool, -) (block.MiniBlockSlice, [][]byte, uint32, error) { - - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMetaBlockPool - } - - miniBlockCache := sp.dataPool.MiniBlocks() - if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMiniBlockPool - } - - txPool := sp.dataPool.Transactions() - if txPool == nil || txPool.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilTransactionPool - } +) (block.MiniBlockSlice, uint32, uint32, error) { miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) + txsAdded := uint32(0) + hdrsAdded := uint32(0) orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) if err != nil { - return nil, nil, 0, err + return nil, 0, 0, err } log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { - return nil, nil, 0, err + return nil, 0, 0, err } // do processing in order - usedMetaHdrsHashes := make([][]byte, 0) + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedMetaBlocks); i++ { if !haveTime() { - log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) + log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", txsAdded)) break } - itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) + itemsAddedInHeader := uint32(len(sp.hdrsForCurrBlock.hdrHashAndInfo) + len(miniBlocks)) if itemsAddedInHeader >= maxItemsInBlock { log.Info(fmt.Sprintf("%d max records allowed to be added in shard hdr has been reached\n", maxItemsInBlock)) break @@ -1457,12 +1444,13 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( } if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr} + hdrsAdded++ lastMetaHdr = hdr continue } - itemsAddedInBody := nrTxAdded + itemsAddedInBody := txsAdded if itemsAddedInBody >= maxItemsInBlock { continue } @@ -1482,10 +1470,11 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( // all txs processed, add to processed miniblocks miniBlocks = append(miniBlocks, currMBProcessed...) - nrTxAdded = nrTxAdded + currTxsAdded + txsAdded = txsAdded + currTxsAdded if currTxsAdded > 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr} + hdrsAdded++ } if !hdrProcessFinished { @@ -1495,12 +1484,9 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( lastMetaHdr = hdr } } + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - sp.mutUsedMetaHdrsHashes.Lock() - sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes - sp.mutUsedMetaHdrsHashes.Unlock() - - return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil + return miniBlocks, txsAdded, hdrsAdded, nil } func (sp *shardProcessor) createMiniBlocks( @@ -1526,12 +1512,12 @@ func (sp *shardProcessor) createMiniBlocks( return nil, process.ErrNilTransactionPool } - destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + destMeMiniBlocks, txs, hdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) if err != nil { log.Info(err.Error()) } - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks, usedMetaHdrsHashes) + processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } @@ -1548,7 +1534,7 @@ func (sp *shardProcessor) createMiniBlocks( } maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) + maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(hdrs) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( @@ -1569,6 +1555,7 @@ func (sp *shardProcessor) createMiniBlocks( // CreateBlockHeader creates a miniblock hdr list given a block body func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { log.Debug(fmt.Sprintf("started creating block hdr in round %d\n", round)) + header := &block.Header{ MiniBlockHeaders: make([]block.MiniBlockHeader, 0), RootHash: sp.getRootHash(), @@ -1590,20 +1577,20 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round return nil, process.ErrWrongTypeAssertion } - mbLen := len(body) totalTxCount := 0 - miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) - for i := 0; i < mbLen; i++ { + miniBlockHeaders := make([]block.MiniBlockHeader, len(body)) + + for i := 0; i < len(body); i++ { txCount := len(body[i].TxHashes) totalTxCount += txCount - mbBytes, err := sp.marshalizer.Marshal(body[i]) + + miniBlockHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, body[i]) if err != nil { return nil, err } - mbHash := sp.hasher.Compute(string(mbBytes)) miniBlockHeaders[i] = block.MiniBlockHeader{ - Hash: mbHash, + Hash: miniBlockHash, SenderShardID: body[i].SenderShardID, ReceiverShardID: body[i].ReceiverShardID, TxCount: uint32(txCount), @@ -1615,16 +1602,27 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round header.TxCount = uint32(totalTxCount) sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + usedMetaHdrsInfo := make([]*nonceAndHashInfo, 0) + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + usedMetaHdrsInfo = append(usedMetaHdrsInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) + } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - sp.mutUsedMetaHdrsHashes.Lock() + if len(usedMetaHdrsInfo) > 1 { + sort.Slice(usedMetaHdrsInfo, func(i, j int) bool { + return usedMetaHdrsInfo[i].nonce < usedMetaHdrsInfo[j].nonce + }) + } - if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { - header.MetaBlockHashes = usedMetaHdrsHashes - delete(sp.usedMetaHdrsHashes, round) + usedMetaHdrsHashes := make([][]byte, len(usedMetaHdrsInfo)) + for i := 0; i < len(usedMetaHdrsInfo); i++ { + usedMetaHdrsHashes[i] = usedMetaHdrsInfo[i].hash } - sp.mutUsedMetaHdrsHashes.Unlock() + header.MetaBlockHashes = usedMetaHdrsHashes sp.blockSizeThrottler.Add( round, From a1ae6b3bafc05b16f70347289c51ffdd2eaf4ad4 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 2 Oct 2019 18:13:50 +0300 Subject: [PATCH 182/234] added parameter for txNonceDelta in txValidator and interceptorsContainerFactory for easier testing cleaned up structs.go of already implemented TODO's --- cmd/node/factory/structs.go | 13 ++++--- ...testInitilalizer.go => testInitializer.go} | 5 +++ integrationTests/testProcessorNode.go | 3 ++ .../interceptors/headerInterceptor_test.go | 8 ++--- process/dataValidators/export_test.go | 3 -- process/dataValidators/txValidator.go | 25 +++++++------ process/dataValidators/txValidator_test.go | 35 ++++++++++++------- .../shard/interceptorsContainerFactory.go | 9 +++-- .../interceptorsContainerFactory_test.go | 27 ++++++++++++++ 9 files changed, 89 insertions(+), 39 deletions(-) rename integrationTests/multiShard/smartContract/{testInitilalizer.go => testInitializer.go} (99%) delete mode 100644 process/dataValidators/export_test.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7badfc223af..25bd562536e 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -92,8 +92,10 @@ const ( var log = logger.DefaultLogger() -//TODO: Extract all others error messages from this file in some defined errors +const maxTxNonceDeltaAllowed = 100 + // ErrCreateForkDetector signals that a fork detector could not be created +//TODO: Extract all others error messages from this file in some defined errors var ErrCreateForkDetector = errors.New("could not create fork detector") // Network struct holds the network components of the Elrond protocol @@ -518,7 +520,6 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err args.state, forkDetector, shardsGenesisBlocks, - args.nodesConfig, args.coreServiceContainer, ) @@ -1216,7 +1217,7 @@ func newShardInterceptorAndResolverContainerFactory( state *State, network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( state.AccountsAdapter, shardCoordinator, @@ -1230,6 +1231,7 @@ func newShardInterceptorAndResolverContainerFactory( crypto.MultiSigner, data.Datapool, state.AddressConverter, + maxTxNonceDeltaAllowed, ) if err != nil { return nil, nil, err @@ -1264,7 +1266,7 @@ func newMetaInterceptorAndResolverContainerFactory( crypto *Crypto, network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( shardCoordinator, nodesCoordinator, @@ -1441,7 +1443,6 @@ func newBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { @@ -1482,7 +1483,6 @@ func newBlockProcessorAndTracker( state, forkDetector, shardsGenesisBlocks, - nodesConfig, coreServiceContainer, ) } @@ -1514,7 +1514,6 @@ func newShardBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { argsParser, err := smartContract.NewAtArgumentParser() diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitializer.go similarity index 99% rename from integrationTests/multiShard/smartContract/testInitilalizer.go rename to integrationTests/multiShard/smartContract/testInitializer.go index 90024f575a8..d3390ce30e9 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -61,6 +61,8 @@ import ( libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) +//TODO refactor this package to use TestNodeProcessor infrastructure + var r *rand.Rand var testHasher = sha256.Sha256{} var testMarshalizer = &marshal.JsonMarshalizer{} @@ -71,6 +73,8 @@ var addrConv, _ = addressConverters.NewPlainAddressConverter(32, "0x") var opGas = int64(1) +const maxTxNonceDeltaAllowed = 8000 + func init() { r = rand.New(rand.NewSource(time.Now().UnixNano())) } @@ -308,6 +312,7 @@ func createNetNode( testMultiSig, dPool, testAddressConverter, + maxTxNonceDeltaAllowed, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 860d15a6e0e..7ff2c6d4af7 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -59,6 +59,8 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() +const maxTxNonceDeltaAllowed = 8000 + // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { Sk crypto.PrivateKey @@ -272,6 +274,7 @@ func (tpn *TestProcessorNode) initInterceptors() { TestMultiSig, tpn.ShardDataPool, TestAddressConverter, + maxTxNonceDeltaAllowed, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 1740a69def5..87fd3898754 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -201,7 +201,7 @@ func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { assert.Nil(t, hi) } -func TestNewHeaderInterceptor_NilChronologyValidatorShouldErr(t *testing.T) { +func TestNewHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T) { t.Parallel() headers := &mock.CacherStub{} @@ -472,7 +472,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) nodesCoordinator := mock.NewNodesCoordinatorMock() nodes := generateValidatorsMap(3, 3, 1) - nodesCoordinator.SetNodesPerShards(nodes) + _ = nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -615,7 +615,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi nodesCoordinator := mock.NewNodesCoordinatorMock() nodes := generateValidatorsMap(3, 3, 1) - nodesCoordinator.SetNodesPerShards(nodes) + _ = nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -696,7 +696,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( } nodes := generateValidatorsMap(3, 3, 5) - nodesCoordinator.SetNodesPerShards(nodes) + _ = nodesCoordinator.SetNodesPerShards(nodes) hi, _ := interceptors.NewHeaderInterceptor( marshalizer, diff --git a/process/dataValidators/export_test.go b/process/dataValidators/export_test.go deleted file mode 100644 index 29ec2b4464f..00000000000 --- a/process/dataValidators/export_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package dataValidators - -const MaxNonceDeltaAllowed = maxNonceDeltaAllowed diff --git a/process/dataValidators/txValidator.go b/process/dataValidators/txValidator.go index 3ad8a470bc8..0de9e5a59a6 100644 --- a/process/dataValidators/txValidator.go +++ b/process/dataValidators/txValidator.go @@ -12,17 +12,21 @@ import ( var log = logger.DefaultLogger() -const maxNonceDeltaAllowed = 100 - // TxValidator represents a tx handler validator that doesn't check the validity of provided txHandler type TxValidator struct { - accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - rejectedTxs uint64 + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + rejectedTxs uint64 + maxNonceDeltaAllowed int } // NewTxValidator creates a new nil tx handler validator instance -func NewTxValidator(accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator) (*TxValidator, error) { +func NewTxValidator( + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + maxNonceDeltaAllowed int, +) (*TxValidator, error) { + if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter } @@ -31,9 +35,10 @@ func NewTxValidator(accounts state.AccountsAdapter, shardCoordinator sharding.Co } return &TxValidator{ - accounts: accounts, - shardCoordinator: shardCoordinator, - rejectedTxs: uint64(0), + accounts: accounts, + shardCoordinator: shardCoordinator, + rejectedTxs: uint64(0), + maxNonceDeltaAllowed: maxNonceDeltaAllowed, }, nil } @@ -57,7 +62,7 @@ func (tv *TxValidator) IsTxValidForProcessing(interceptedTx process.TxValidatorH accountNonce := accountHandler.GetNonce() txNonce := interceptedTx.Nonce() lowerNonceInTx := txNonce < accountNonce - veryHighNonceInTx := txNonce > accountNonce+maxNonceDeltaAllowed + veryHighNonceInTx := txNonce > accountNonce+uint64(tv.maxNonceDeltaAllowed) isTxRejected := lowerNonceInTx || veryHighNonceInTx if isTxRejected { tv.rejectedTxs++ diff --git a/process/dataValidators/txValidator_test.go b/process/dataValidators/txValidator_test.go index 5d50dea7798..e6f64a75343 100644 --- a/process/dataValidators/txValidator_test.go +++ b/process/dataValidators/txValidator_test.go @@ -59,7 +59,8 @@ func TestTxValidator_NewValidatorNilAccountsShouldErr(t *testing.T) { t.Parallel() shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(nil, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(nil, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, txValidator) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -69,7 +70,8 @@ func TestTxValidator_NewValidatorNilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() accounts := getAccAdapter(0, big.NewInt(0)) - txValidator, err := dataValidators.NewTxValidator(accounts, nil) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, nil, maxNonceDeltaAllowed) assert.Nil(t, txValidator) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -80,7 +82,8 @@ func TestTxValidator_NewValidatorShouldWork(t *testing.T) { accounts := getAccAdapter(0, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) assert.NotNil(t, txValidator) @@ -94,7 +97,8 @@ func TestTxValidator_IsTxValidForProcessingTxIsCrossShardShouldReturnTrue(t *tes accounts := getAccAdapter(1, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -112,7 +116,8 @@ func TestTxValidator_IsTxValidForProcessingAccountNonceIsGreaterThanTxNonceShoul accounts := getAccAdapter(accountNonce, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -126,11 +131,12 @@ func TestTxValidator_IsTxValidForProcessingTxNonceIsTooHigh(t *testing.T) { t.Parallel() accountNonce := uint64(100) - txNonce := accountNonce + dataValidators.MaxNonceDeltaAllowed + 1 + maxNonceDeltaAllowed := 100 + txNonce := accountNonce + uint64(maxNonceDeltaAllowed) + 1 accounts := getAccAdapter(accountNonce, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -150,7 +156,8 @@ func TestTxValidator_IsTxValidForProcessingAccountBalanceIsLessThanTxTotalValueS accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -170,7 +177,8 @@ func TestTxValidator_IsTxValidForProcessingNumOfRejectedTxShouldIncreaseShouldRe accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -191,7 +199,8 @@ func TestTxValidator_IsTxValidForProcessingAccountNotExitsShouldReturnFalse(t *t return nil, errors.New("cannot find account") } shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) @@ -208,7 +217,8 @@ func TestTxValidator_IsTxValidForProcessingWrongAccountTypeShouldReturnFalse(t * return &state.MetaAccount{}, nil } shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) @@ -224,7 +234,8 @@ func TestTxValidator_IsTxValidForProcessingTxIsOkShouldReturnTrue(t *testing.T) accountBalance := big.NewInt(10) accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index c48556a7779..28f782a8204 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -18,7 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -const maxGoRoutinexTxInterceptor = 100 +const maxGoRoutineTxInterceptor = 100 type interceptorsContainerFactory struct { accounts state.AccountsAdapter @@ -34,6 +34,7 @@ type interceptorsContainerFactory struct { addrConverter state.AddressConverter nodesCoordinator sharding.NodesCoordinator txInterceptorThrottler process.InterceptorThrottler + maxTxNonceDeltaAllowed int } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -50,6 +51,7 @@ func NewInterceptorsContainerFactory( multiSigner crypto.MultiSigner, dataPool dataRetriever.PoolsHolder, addrConverter state.AddressConverter, + maxTxNonceDeltaAllowed int, ) (*interceptorsContainerFactory, error) { if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter @@ -88,7 +90,7 @@ func NewInterceptorsContainerFactory( return nil, process.ErrNilNodesCoordinator } - txInterceptorThrottler, err := throttler.NewNumGoRoutineThrottler(maxGoRoutinexTxInterceptor) + txInterceptorThrottler, err := throttler.NewNumGoRoutineThrottler(maxGoRoutineTxInterceptor) if err != nil { return nil, err } @@ -107,6 +109,7 @@ func NewInterceptorsContainerFactory( dataPool: dataPool, addrConverter: addrConverter, txInterceptorThrottler: txInterceptorThrottler, + maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, }, nil } @@ -237,7 +240,7 @@ func (icf *interceptorsContainerFactory) generateTxInterceptors() ([]string, []p } func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier string) (process.Interceptor, error) { - txValidator, err := dataValidators.NewTxValidator(icf.accounts, icf.shardCoordinator) + txValidator, err := dataValidators.NewTxValidator(icf.accounts, icf.shardCoordinator, icf.maxTxNonceDeltaAllowed) if err != nil { return nil, err } diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 173402887fc..d1eaf0e9821 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -17,6 +17,8 @@ import ( var errExpected = errors.New("expected error") +const maxTxNonceDeltaAllowed = 100 + func createStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) process.TopicHandler { return &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { @@ -96,6 +98,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -118,6 +121,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -140,6 +144,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -162,6 +167,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -184,6 +190,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -206,6 +213,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -228,6 +236,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -250,6 +259,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -272,6 +282,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -294,6 +305,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { nil, createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -316,6 +328,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -338,6 +351,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), nil, + maxTxNonceDeltaAllowed, ) assert.Nil(t, icf) @@ -360,6 +374,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) assert.NotNil(t, icf) @@ -384,6 +399,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -408,6 +424,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -432,6 +449,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -456,6 +474,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -480,6 +499,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -504,6 +524,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -528,6 +549,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -552,6 +574,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -576,6 +599,7 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -600,6 +624,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -631,6 +656,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, err := icf.Create() @@ -675,6 +701,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, ) container, _ := icf.Create() From c89256d3ef9123da5c7beb544f3987c10d75cc9a Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 2 Oct 2019 18:45:49 +0300 Subject: [PATCH 183/234] deleted unnecessary ManualRollback func --- consensus/mock/bootstrapMock.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/consensus/mock/bootstrapMock.go b/consensus/mock/bootstrapMock.go index f297bb9ea66..342eead37f7 100644 --- a/consensus/mock/bootstrapMock.go +++ b/consensus/mock/bootstrapMock.go @@ -14,7 +14,6 @@ type BootstrapperMock struct { StartSyncCalled func() StopSyncCalled func() SetStatusHandlerCalled func(handler core.AppStatusHandler) error - ManualRollbackCalled func() error } func (boot *BootstrapperMock) CreateAndCommitEmptyBlock(shardForCurrentNode uint32) (data.BodyHandler, data.HeaderHandler, error) { @@ -55,10 +54,6 @@ func (boot *BootstrapperMock) SetStatusHandler(handler core.AppStatusHandler) er return boot.SetStatusHandlerCalled(handler) } -func (boot *BootstrapperMock) ManualRollback() error { - return boot.ManualRollbackCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (boot *BootstrapperMock) IsInterfaceNil() bool { if boot == nil { From 2793fb6862bb81e0900ff395ae38da6027ee9b3c Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 2 Oct 2019 19:41:23 +0300 Subject: [PATCH 184/234] added TODO --- process/transaction/interceptor.go | 1 + 1 file changed, 1 insertion(+) diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index 5fe01fcf37f..67e0102a6f2 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -136,6 +136,7 @@ func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { continue } + //TODO: check if throttler needs to be applied also on the following go routine. go txi.processTransaction(txIntercepted) } From cc004f4a13fff35eb1591aad491190e24a75747d Mon Sep 17 00:00:00 2001 From: Iuga Mihai <50499646+miiu96@users.noreply.github.com> Date: Wed, 2 Oct 2019 20:19:21 +0300 Subject: [PATCH 185/234] En 4170 economics parameters (#495) * EN-3703 : implement white list structure that will hold white list data * EN-4170 : fix failing tests * EN-4170 : fix problem after merge * EN-4170 : add unit tests * EN-4170 : fix after review --- cmd/node/config/config.toml | 8 +- cmd/node/config/economics.toml | 18 +++ cmd/node/factory/structs.go | 24 ++- cmd/node/main.go | 29 +++- config/config.go | 9 +- config/economicsConfig.go | 29 ++++ config/tomlConfig_test.go | 63 ++++++-- integrationTests/mock/feeHandlerMock.go | 27 ++++ .../smartContract/testInitilalizer.go | 31 ++++ integrationTests/testInitializer.go | 11 ++ integrationTests/testProcessorNode.go | 40 +++++ integrationTests/vm/testInitializer.go | 2 + process/block/preprocess/rewardsHandler.go | 27 ++-- .../block/preprocess/rewardsHandler_test.go | 37 +++++ process/block/preprocess/transactions.go | 8 +- process/block/preprocess/transactions_test.go | 32 ++++ process/block/shardblock_test.go | 11 ++ process/coordinator/process_test.go | 51 ++++++- process/economics/economicsData.go | 88 +++++++++++ process/economics/economicsData_test.go | 142 ++++++++++++++++++ process/errors.go | 23 +-- .../intermediateProcessorsContainerFactory.go | 5 + ...rmediateProcessorsContainerFactory_test.go | 8 + .../shard/preProcessorsContainerFactory.go | 7 + .../preProcessorsContainerFactory_test.go | 16 ++ process/interface.go | 24 +++ process/mock/feeHandlerMock.go | 27 ++++ process/mock/rewardsHandlerMock.go | 32 ++++ process/transaction/export_test.go | 12 -- process/transaction/process.go | 24 +-- process/transaction/process_test.go | 52 +++++-- 31 files changed, 817 insertions(+), 100 deletions(-) create mode 100644 cmd/node/config/economics.toml create mode 100644 config/economicsConfig.go create mode 100644 integrationTests/mock/feeHandlerMock.go create mode 100644 process/economics/economicsData.go create mode 100644 process/economics/economicsData_test.go create mode 100644 process/mock/feeHandlerMock.go create mode 100644 process/mock/rewardsHandlerMock.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 69f3b3a5d46..7b037ab49cb 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -14,8 +14,8 @@ NodeDisplayName = "" [Explorer] - Enabled = false - IndexerURL = "http://localhost:9200" + Enabled = false + IndexerURL = "http://localhost:9200" [MiniBlocksStorage] [MiniBlocksStorage.Cache] @@ -245,6 +245,4 @@ Timeout = 0 # Setting 0 means 'use default value' Version = 0 # Setting 0 means 'use default value' -[EconomicsConfig] - CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" - BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml new file mode 100644 index 00000000000..e90b4bce29f --- /dev/null +++ b/cmd/node/config/economics.toml @@ -0,0 +1,18 @@ +#Econimics config of the node +[EconomicsAddresses] + CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" + BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + +[RewardsSettings] + RewardsValue = 1000 + CommunityPercentage = 0.10 + LeaderPercentage = 0.50 + BurnPercentage = 0.40 + +[FeeSettings] + MinGasPrice = 0 + MinGasLimitForTx = 5 + MinTxFee = 0 + + + diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 7badfc223af..838ccb7cb29 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -56,6 +56,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" @@ -410,7 +411,7 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, c type processComponentsFactoryArgs struct { genesisConfig *sharding.Genesis - economicsConfig *config.EconomicsConfig + economicsData *economics.EconomicsData nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator @@ -426,7 +427,7 @@ type processComponentsFactoryArgs struct { // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( genesisConfig *sharding.Genesis, - economicsConfig *config.EconomicsConfig, + economicsData *economics.EconomicsData, nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, @@ -440,7 +441,7 @@ func NewProcessComponentsFactoryArgs( ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ genesisConfig: genesisConfig, - economicsConfig: economicsConfig, + economicsData: economicsData, nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, @@ -512,7 +513,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err resolversFinder, args.shardCoordinator, args.nodesCoordinator, - args.economicsConfig, + args.economicsData, args.data, args.core, args.state, @@ -1435,7 +1436,7 @@ func newBlockProcessorAndTracker( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, - economicsConfig *config.EconomicsConfig, + economics *economics.EconomicsData, data *Data, core *Core, state *State, @@ -1445,16 +1446,18 @@ func newBlockProcessorAndTracker( coreServiceContainer serviceContainer.Core, ) (process.BlockProcessor, process.BlocksTracker, error) { - if economicsConfig.CommunityAddress == "" || economicsConfig.BurnAddress == "" { + communityAddr := economics.CommunityAddress() + burnAddr := economics.BurnAddress() + if communityAddr == "" || burnAddr == "" { return nil, nil, errors.New("rewards configuration missing") } - communityAddress, err := hex.DecodeString(economicsConfig.CommunityAddress) + communityAddress, err := hex.DecodeString(communityAddr) if err != nil { return nil, nil, err } - burnAddress, err := hex.DecodeString(economicsConfig.BurnAddress) + burnAddress, err := hex.DecodeString(burnAddr) if err != nil { return nil, nil, err } @@ -1484,6 +1487,7 @@ func newBlockProcessorAndTracker( shardsGenesisBlocks, nodesConfig, coreServiceContainer, + economics, ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { @@ -1516,6 +1520,7 @@ func newShardBlockProcessorAndTracker( shardsGenesisBlocks map[uint32]data.HeaderHandler, nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, + economics *economics.EconomicsData, ) (process.BlockProcessor, process.BlocksTracker, error) { argsParser, err := smartContract.NewAtArgumentParser() if err != nil { @@ -1540,6 +1545,7 @@ func newShardBlockProcessorAndTracker( specialAddressHandler, data.Store, data.Datapool, + economics, ) if err != nil { return nil, nil, err @@ -1624,6 +1630,7 @@ func newShardBlockProcessorAndTracker( scProcessor, rewardsTxHandler, txTypeHandler, + economics, ) if err != nil { return nil, nil, errors.New("could not create transaction processor: " + err.Error()) @@ -1653,6 +1660,7 @@ func newShardBlockProcessorAndTracker( scProcessor, rewardsTxProcessor, internalTransactionProducer, + economics, ) if err != nil { return nil, nil, err diff --git a/cmd/node/main.go b/cmd/node/main.go index 8d03e2cb01c..e71f958043f 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -38,6 +38,7 @@ import ( "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/ntp" + "github.com/ElrondNetwork/elrond-go/process/economics" factoryVM "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" @@ -107,6 +108,13 @@ VERSION: Usage: "The main configuration file to load", Value: "./config/config.toml", } + // configurationEconomicsFile defines a flag for the path to the economics toml configuration file + configurationEconomicsFile = cli.StringFlag{ + Name: "configEconomics", + Usage: "The economics configuration file to load", + Value: "./config/economics.toml", + } + // p2pConfigurationFile defines a flag for the path to the toml file containing P2P configuration p2pConfigurationFile = cli.StringFlag{ Name: "p2pconfig", @@ -289,6 +297,7 @@ func main() { nodesFile, port, configurationFile, + configurationEconomicsFile, p2pConfigurationFile, txSignSk, sk, @@ -365,6 +374,13 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } log.Info(fmt.Sprintf("Initialized with config from: %s", configurationFileName)) + configurationEconomicsFileName := ctx.GlobalString(configurationEconomicsFile.Name) + economicsConfig, err := loadEconomicsConfig(configurationEconomicsFileName, log) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Initialized with config economics from: %s", configurationEconomicsFileName)) + p2pConfigurationFileName := ctx.GlobalString(p2pConfigurationFile.Name) p2pConfig, err := core.LoadP2PConfig(p2pConfigurationFileName) if err != nil { @@ -631,11 +647,11 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - economicsConfig := &generalConfig.EconomicsConfig + economicsData := economics.NewEconomicsData(economicsConfig) processArgs := factory.NewProcessComponentsFactoryArgs( genesisConfig, - economicsConfig, + economicsData, nodesConfig, syncer, shardCoordinator, @@ -985,6 +1001,15 @@ func loadMainConfig(filepath string, log *logger.Logger) (*config.Config, error) return cfg, nil } +func loadEconomicsConfig(filepath string, log *logger.Logger) (*config.ConfigEconomics, error) { + cfg := &config.ConfigEconomics{} + err := core.LoadTomlFile(cfg, filepath, log) + if err != nil { + return nil, err + } + return cfg, nil +} + func getShardIdFromNodePubKey(pubKey crypto.PublicKey, nodesConfig *sharding.NodesSetup) (uint32, error) { if pubKey == nil { return 0, errors.New("nil public key") diff --git a/config/config.go b/config/config.go index 4c3dccc0447..472c1156f8b 100644 --- a/config/config.go +++ b/config/config.go @@ -56,12 +56,6 @@ type NTPConfig struct { Version int } -// EconomicsConfig will hold the reward configuration -type EconomicsConfig struct { - CommunityAddress string - BurnAddress string -} - // Config will hold the entire application configuration parameters type Config struct { MiniBlocksStorage StorageConfig @@ -106,8 +100,7 @@ type Config struct { Consensus TypeConfig Explorer ExplorerConfig - NTPConfig NTPConfig - EconomicsConfig EconomicsConfig + NTPConfig NTPConfig } // NodeConfig will hold basic p2p settings diff --git a/config/economicsConfig.go b/config/economicsConfig.go new file mode 100644 index 00000000000..d1c1623e127 --- /dev/null +++ b/config/economicsConfig.go @@ -0,0 +1,29 @@ +package config + +// EconomicsAddresses will hold economics addresses +type EconomicsAddresses struct { + CommunityAddress string + BurnAddress string +} + +// RewardsSettings will hold economics rewards settings +type RewardsSettings struct { + RewardsValue uint64 + CommunityPercentage float64 + LeaderPercentage float64 + BurnPercentage float64 +} + +// FeeSettings will hold economics fee settings +type FeeSettings struct { + MinGasPrice uint64 + MinGasLimitForTx uint64 + MinTxFee uint64 +} + +// ConfigEconomics will hold economics config +type ConfigEconomics struct { + EconomicsAddresses EconomicsAddresses + RewardsSettings RewardsSettings + FeeSettings FeeSettings +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 5773ddc42b5..d8bcaebcb0e 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -1,6 +1,7 @@ package config import ( + "fmt" "strconv" "testing" @@ -32,9 +33,6 @@ func TestTomlParser(t *testing.T) { consensusType := "bn" - communityAddress := "community" - burnAddress := "burnAddress" - cfgExpected := Config{ MiniBlocksStorage: StorageConfig{ Cache: CacheConfig{ @@ -74,10 +72,6 @@ func TestTomlParser(t *testing.T) { Consensus: TypeConfig{ Type: consensusType, }, - EconomicsConfig: EconomicsConfig{ - CommunityAddress: communityAddress, - BurnAddress: burnAddress, - }, } testString := ` @@ -115,9 +109,6 @@ func TestTomlParser(t *testing.T) { [Consensus] Type = "` + consensusType + `" -[EconomicsConfig] - CommunityAddress = "` + communityAddress + `" - BurnAddress = "` + burnAddress + `" ` cfg := Config{} @@ -126,3 +117,55 @@ func TestTomlParser(t *testing.T) { assert.Nil(t, err) assert.Equal(t, cfgExpected, cfg) } + +func TestTomlEconomicsParser(t *testing.T) { + communityAddress := "commAddr" + burnAddress := "burnAddr" + rewardsValue := uint64(500) + communityPercentage := 0.1 + leaderPercentage := 0.1 + burnPercentage := 0.8 + minGasPrice := uint64(1) + minGasLimitForTx := uint64(2) + minTxFee := uint64(3) + + cfgEconomicsExpected := ConfigEconomics{ + EconomicsAddresses: EconomicsAddresses{ + CommunityAddress: communityAddress, + BurnAddress: burnAddress, + }, + RewardsSettings: RewardsSettings{ + RewardsValue: rewardsValue, + CommunityPercentage: communityPercentage, + LeaderPercentage: leaderPercentage, + BurnPercentage: burnPercentage, + }, + FeeSettings: FeeSettings{ + MinGasPrice: minGasPrice, + MinGasLimitForTx: minGasLimitForTx, + MinTxFee: minTxFee, + }, + } + + testString := ` +[EconomicsAddresses] + CommunityAddress = "` + communityAddress + `" + BurnAddress = "` + burnAddress + `" +[RewardsSettings] + RewardsValue = ` + strconv.FormatUint(rewardsValue, 10) + ` + CommunityPercentage = ` + fmt.Sprintf("%.6f", communityPercentage) + ` + LeaderPercentage = ` + fmt.Sprintf("%.6f", leaderPercentage) + ` + BurnPercentage = ` + fmt.Sprintf("%.6f", burnPercentage) + ` +[FeeSettings] + MinGasPrice = ` + strconv.FormatUint(minGasPrice, 10) + ` + MinGasLimitForTx = ` + strconv.FormatUint(minGasLimitForTx, 10) + ` + MinTxFee = ` + strconv.FormatUint(minTxFee, 10) + ` +` + + cfg := ConfigEconomics{} + + err := toml.Unmarshal([]byte(testString), &cfg) + + assert.Nil(t, err) + assert.Equal(t, cfgEconomicsExpected, cfg) +} diff --git a/integrationTests/mock/feeHandlerMock.go b/integrationTests/mock/feeHandlerMock.go new file mode 100644 index 00000000000..881d128d98b --- /dev/null +++ b/integrationTests/mock/feeHandlerMock.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerMock struct { + MinGasPriceCalled func() uint64 + MinGasLimitForTxCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhm *FeeHandlerMock) MinGasPrice() uint64 { + return fhm.MinGasPriceCalled() +} + +func (fhm *FeeHandlerMock) MinGasLimitForTx() uint64 { + return fhm.MinGasLimitForTxCalled() +} + +func (fhm *FeeHandlerMock) MinTxFee() uint64 { + return fhm.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhm *FeeHandlerMock) IsInterfaceNil() bool { + if fhm == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 90024f575a8..619c94b2236 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -13,6 +13,7 @@ import ( "sync/atomic" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core/partitioning" @@ -45,6 +46,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" @@ -336,6 +338,12 @@ func createNetNode( 100, ) + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{}, + RewardsSettings: config.RewardsSettings{}, + FeeSettings: config.FeeSettings{}, + }) + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, testMarshalizer, @@ -348,6 +356,7 @@ func createNetNode( ), store, dPool, + economicsData, ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) @@ -390,6 +399,17 @@ func createNetNode( scProcessor, rewardsHandler, txTypeHandler, + &mock.FeeHandlerMock{ + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + MinGasPriceCalled: func() uint64 { + return 0 + }, + }, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -406,6 +426,17 @@ func createNetNode( scProcessor, rewardProcessor, internalTxProducer, + &mock.FeeHandlerMock{ + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + MinGasPriceCalled: func() uint64 { + return 0 + }, + }, ) container, _ := fact.Create() diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 185f77a2af2..efc5f35ee91 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -431,6 +431,17 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, ) return txProcessor diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 860d15a6e0e..1e17d4da57f 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -7,6 +7,7 @@ import ( "sync/atomic" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core" @@ -32,6 +33,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" @@ -332,6 +334,21 @@ func (tpn *TestProcessorNode) initInnerProcessors() { return } + economicsData := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + "addr1", + "addr2", + }, + RewardsSettings: config.RewardsSettings{ + 1000, 0.10, 0.50, 0.40, + }, + FeeSettings: config.FeeSettings{ + 0, 5, 0, + }, + }, + ) + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( tpn.ShardCoordinator, TestMarshalizer, @@ -340,6 +357,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.SpecialAddressHandler, tpn.Storage, tpn.ShardDataPool, + economicsData, ) tpn.InterimProcContainer, _ = interimProcFactory.Create() @@ -388,6 +406,17 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScProcessor, rewardsHandler, txTypeHandler, + &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -404,6 +433,17 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScProcessor.(process.SmartContractResultProcessor), tpn.RewardsProcessor, internalTxProducer, + &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, ) tpn.PreProcessorsContainer, _ = fact.Create() diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index c940c7db504..46a987cea7b 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -118,6 +118,7 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa scProcessor, &mock.UnsignedTxHandlerMock{}, txTypeHandler, + &mock.FeeHandlerMock{}, ) return txProcessor @@ -178,6 +179,7 @@ func CreateTxProcessorWithOneSCExecutorIeleVM( scProcessor, &mock.UnsignedTxHandlerMock{}, txTypeHandler, + &mock.FeeHandlerMock{}, ) return txProcessor, blockChainHook diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 099a8008e46..c866a26b25e 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -16,13 +16,6 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -const communityPercentage = 0.1 // 1 = 100%, 0 = 0% -const leaderPercentage = 0.5 // 1 = 100%, 0 = 0% -const burnPercentage = 0.4 // 1 = 100%, 0 = 0% - -// TODO: Replace with valid reward value -var rewardValue = big.NewInt(1000) - type rewardsHandler struct { address process.SpecialAddressHandler hasher hashing.Hasher @@ -40,6 +33,8 @@ type rewardsHandler struct { mut sync.Mutex accumulatedFees *big.Int rewardTxsForBlock map[string]*rewardTx.RewardTx + economicsRewards process.RewardsHandler + rewardValue *big.Int } // NewRewardTxHandler constructor for the reward transaction handler @@ -51,6 +46,7 @@ func NewRewardTxHandler( adrConv state.AddressConverter, store dataRetriever.StorageService, rewardTxPool dataRetriever.ShardedDataCacherNotifier, + economicsRewards process.RewardsHandler, ) (*rewardsHandler, error) { if address == nil || address.IsInterfaceNil() { return nil, process.ErrNilSpecialAddressHandler @@ -73,6 +69,11 @@ func NewRewardTxHandler( if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { return nil, process.ErrNilRewardTxDataPool } + if economicsRewards == nil || economicsRewards.IsInterfaceNil() { + return nil, process.ErrNilEconomicsRewardsHandler + } + + rewardValue := big.NewInt(int64(economicsRewards.RewardsValue())) rtxh := &rewardsHandler{ address: address, @@ -82,6 +83,8 @@ func NewRewardTxHandler( marshalizer: marshalizer, store: store, rewardTxPool: rewardTxPool, + economicsRewards: economicsRewards, + rewardValue: rewardValue, } rtxh.accumulatedFees = big.NewInt(0) @@ -288,7 +291,7 @@ func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, leaderPercentage) + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.LeaderPercentage()) currTx.RcvAddr = rtxh.address.LeaderAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() currTx.Epoch = rtxh.address.Epoch() @@ -300,7 +303,7 @@ func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, burnPercentage) + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.BurnPercentage()) currTx.RcvAddr = rtxh.address.BurnAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() currTx.Epoch = rtxh.address.Epoch() @@ -312,7 +315,7 @@ func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { currTx := &rewardTx.RewardTx{} - currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, communityPercentage) + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.CommunityPercentage()) currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() currTx.ShardId = rtxh.shardCoordinator.SelfId() currTx.Epoch = rtxh.address.Epoch() @@ -350,7 +353,7 @@ func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { consensusRewardTxs := make([]data.TransactionHandler, 0) for _, address := range consensusRewardData.Addresses { rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue + rTx.Value = rtxh.rewardValue rTx.RcvAddr = []byte(address) rTx.ShardId = rtxh.shardCoordinator.SelfId() rTx.Epoch = consensusRewardData.Epoch @@ -380,7 +383,7 @@ func (rtxh *rewardsHandler) createProtocolRewardsForMeta() []data.TransactionHan } rTx := &rewardTx.RewardTx{} - rTx.Value = rewardValue + rTx.Value = rtxh.rewardValue rTx.RcvAddr = []byte(address) rTx.ShardId = rtxh.shardCoordinator.SelfId() rTx.Epoch = metaConsensusSet.Epoch diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index 9523a05abee..b343f56b5bd 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -15,6 +15,23 @@ import ( "github.com/stretchr/testify/assert" ) +func RewandsHandlerMock() *mock.RewardsHandlerMock { + return &mock.RewardsHandlerMock{ + RewardsValueCalled: func() uint64 { + return 1000 + }, + CommunityPercentageCalled: func() float64 { + return 0.10 + }, + LeaderPercentageCalled: func() float64 { + return 0.50 + }, + BurnPercentageCalled: func() float64 { + return 0.40 + }, + } +} + func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { t.Parallel() @@ -27,6 +44,7 @@ func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -45,6 +63,7 @@ func TestNewRewardTxHandler_NilHasher(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -63,6 +82,7 @@ func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -81,6 +101,7 @@ func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -99,6 +120,7 @@ func TestNewRewardTxHandler_NilAddressConverter(t *testing.T) { nil, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -117,6 +139,7 @@ func TestNewRewardTxHandler_NilChainStorer(t *testing.T) { &mock.AddressConverterMock{}, nil, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -134,6 +157,7 @@ func TestNewRewardTxHandler_NilRewardsPool(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, nil, + RewandsHandlerMock(), ) assert.Nil(t, th) @@ -152,6 +176,7 @@ func TestNewRewardTxHandler_ValsOk(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -170,6 +195,7 @@ func TestRewardsHandler_AddIntermediateTransactions(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -191,6 +217,7 @@ func TestRewardsHandler_ProcessTransactionFee(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -218,6 +245,7 @@ func TestRewardsHandler_cleanCachedData(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -245,6 +273,7 @@ func TestRewardsHandler_CreateRewardsFromFees(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -282,6 +311,7 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { adrConv, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -316,6 +346,7 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing adrConv, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -352,6 +383,7 @@ func TestRewardsHandler_VerifyCreatedRewardsTxsOK(t *testing.T) { adrConv, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -385,6 +417,7 @@ func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -423,6 +456,7 @@ func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, err) @@ -471,6 +505,7 @@ func TestRewardsHandler_CreateMarshalizedDataShouldWork(t *testing.T) { &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) txs := []data.TransactionHandler{ @@ -522,6 +557,7 @@ func TestRewardsHandler_CreateBlockStartedShouldCreateProtocolReward(t *testing. &mock.AddressConverterMock{}, &mock.ChainStorerMock{}, tdp.RewardTransactions(), + RewandsHandlerMock(), ) assert.Nil(t, th.protocolRewards) @@ -548,6 +584,7 @@ func TestRewardsHandler_SaveCurrentIntermediateTxToStorageShouldWork(t *testing. }, }, tdp.RewardTransactions(), + RewandsHandlerMock(), ) txs := []data.TransactionHandler{ diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 7aece2597e0..ade91636526 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -38,6 +38,7 @@ type transactions struct { orderedTxs map[string][]*transaction.Transaction orderedTxHashes map[string][][]byte mutOrderedTxs sync.RWMutex + economicsFee process.FeeHandler } // NewTransactionPreprocessor creates a new transaction preprocessor object @@ -50,6 +51,7 @@ func NewTransactionPreprocessor( shardCoordinator sharding.Coordinator, accounts state.AccountsAdapter, onRequestTransaction func(shardID uint32, txHashes [][]byte), + economicsFee process.FeeHandler, ) (*transactions, error) { if hasher == nil || hasher.IsInterfaceNil() { @@ -90,6 +92,7 @@ func NewTransactionPreprocessor( onRequestTransaction: onRequestTransaction, txProcessor: txProcessor, accounts: accounts, + economicsFee: economicsFee, } txs.chRcvAllTxs = make(chan bool) @@ -409,9 +412,6 @@ func (txs *transactions) getAllTxsFromMiniBlock( return transactions, txHashes, nil } -//TODO move this constant to txFeeHandler -const minGasLimitForTx = uint64(5) - //TODO move this to smart contract address calculation component func isSmartContractAddress(rcvAddress []byte) bool { isEmptyAddress := bytes.Equal(rcvAddress, make([]byte, len(rcvAddress))) @@ -519,7 +519,7 @@ func (txs *transactions) CreateAndProcessMiniBlock( continue } - currTxGasLimit := minGasLimitForTx + currTxGasLimit := txs.economicsFee.MinGasLimitForTx() if isSmartContractAddress(orderedTxs[index].RcvAddr) { currTxGasLimit = orderedTxs[index].GasLimit } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 09cf2d50564..068d338b8ae 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -27,6 +27,20 @@ import ( "github.com/stretchr/testify/assert" ) +func FeeHandlerMock() *mock.FeeHandlerMock { + return &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + } +} + func initDataPool() *mock.PoolsHolderStub { sdp := &mock.PoolsHolderStub{ TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { @@ -187,6 +201,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilPool(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -207,6 +222,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilStore(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -227,6 +243,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilHasher(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -247,6 +264,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilMarsalizer(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -267,6 +285,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilTxProce(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -287,6 +306,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilShardCoord(t *testing.T) { nil, &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -307,6 +327,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilAccounts(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), nil, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -326,6 +347,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilRequestFunc(t *testing.T) mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, nil, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -345,6 +367,7 @@ func TestTxsPreProcessor_GetTransactionFromPool(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) txHash := []byte("tx1_hash") tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) @@ -366,6 +389,7 @@ func TestTransactionPreprocessor_RequestTransactionFromNetwork(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) shardId := uint32(1) txHash1 := []byte("tx_hash1") @@ -393,6 +417,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) shardId := uint32(1) @@ -435,6 +460,7 @@ func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *test mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) //add 3 tx hashes on requested list @@ -508,6 +534,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) mb := &block.MiniBlock{ @@ -542,6 +569,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *test mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) assert.NotNil(t, err) @@ -561,6 +589,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) body := make(block.Body, 0) txHash := []byte("txHash") @@ -595,6 +624,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testi mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) @@ -637,6 +667,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCal mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) @@ -681,6 +712,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCal mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3cda2c559f5..f24661e06f0 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -437,6 +437,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() @@ -625,6 +626,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() @@ -1803,6 +1805,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() @@ -2366,6 +2369,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() @@ -2468,6 +2472,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() @@ -2884,6 +2889,11 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{ + MinGasLimitForTxCalled: func() uint64 { + return 0 + }, + }, ) container, _ := factory.Create() @@ -3074,6 +3084,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) container, _ := factory.Create() diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 9e2f2b5538b..b9fe2cac414 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -21,6 +22,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -30,10 +32,24 @@ import ( "github.com/stretchr/testify/assert" ) +func FeeHandlerMock() *mock.FeeHandlerMock { + return &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + } +} + func createShardedDataChacherNotifier( handler data.TransactionHandler, testHash []byte, -) (func() dataRetriever.ShardedDataCacherNotifier ) { +) func() dataRetriever.ShardedDataCacherNotifier { return func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{ RegisterHandlerCalled: func(i func(key []byte)) {}, @@ -369,6 +385,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -376,6 +393,13 @@ func createPreProcessorContainer() process.PreProcessorsContainer { } func createInterimProcessorContainer() process.IntermediateProcessorContainer { + economicsData := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{}, + RewardsSettings: config.RewardsSettings{}, + FeeSettings: config.FeeSettings{}, + }, + ) preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(5), &mock.MarshalizerMock{}, @@ -384,6 +408,7 @@ func createInterimProcessorContainer() process.IntermediateProcessorContainer { &mock.SpecialAddressHandlerMock{}, initStore(), initDataPool([]byte("test_hash1")), + economicsData, ) container, _ := preFactory.Create() @@ -409,6 +434,7 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -680,6 +706,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -770,6 +797,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1134,6 +1162,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1288,6 +1317,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1408,6 +1438,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1523,6 +1554,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1629,6 +1661,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1656,6 +1689,13 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testing.T) { t.Parallel() + economicsData := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{}, + RewardsSettings: config.RewardsSettings{}, + FeeSettings: config.FeeSettings{}, + }, + ) txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) @@ -1668,6 +1708,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, tdp, + economicsData, ) container, _ := preFactory.Create() @@ -1701,6 +1742,13 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { t.Parallel() + economicsData := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{}, + RewardsSettings: config.RewardsSettings{}, + FeeSettings: config.FeeSettings{}, + }, + ) txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) @@ -1713,6 +1761,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, tdp, + economicsData, ) container, _ := preFactory.Create() diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go new file mode 100644 index 00000000000..0d9b63b3d96 --- /dev/null +++ b/process/economics/economicsData.go @@ -0,0 +1,88 @@ +package economics + +import ( + "github.com/ElrondNetwork/elrond-go/config" +) + +// EconomicsData will store information about economics +type EconomicsData struct { + rewardsValue uint64 + communityPercentage float64 + leaderPercentage float64 + burnPercentage float64 + + minGasPrice uint64 + minGasLimitForTx uint64 + minTxFee uint64 + + communityAddress string + burnAddress string +} + +// NewEconomicsData will create and object with information about economics parameters +func NewEconomicsData(economics *config.ConfigEconomics) *EconomicsData { + return &EconomicsData{ + rewardsValue: economics.RewardsSettings.RewardsValue, + communityPercentage: economics.RewardsSettings.CommunityPercentage, + leaderPercentage: economics.RewardsSettings.LeaderPercentage, + burnPercentage: economics.RewardsSettings.BurnPercentage, + minGasPrice: economics.FeeSettings.MinGasPrice, + minGasLimitForTx: economics.FeeSettings.MinGasLimitForTx, + minTxFee: economics.FeeSettings.MinTxFee, + communityAddress: economics.EconomicsAddresses.CommunityAddress, + burnAddress: economics.EconomicsAddresses.BurnAddress, + } +} + +// RewardsValue will return rewards value +func (ed *EconomicsData) RewardsValue() uint64 { + return ed.rewardsValue +} + +// CommunityPercentage will return community reward percentage +func (ed *EconomicsData) CommunityPercentage() float64 { + return ed.communityPercentage +} + +// LeaderPercentage will return leader reward percentage +func (ed *EconomicsData) LeaderPercentage() float64 { + return ed.leaderPercentage +} + +// BurnPercentage will return burn percentage +func (ed *EconomicsData) BurnPercentage() float64 { + return ed.burnPercentage +} + +// MinGasPrice will return minimum gas price +func (ed *EconomicsData) MinGasPrice() uint64 { + return ed.minGasPrice +} + +// MinGasLimitForTx will return minimum gas limit +func (ed *EconomicsData) MinGasLimitForTx() uint64 { + return ed.minGasLimitForTx +} + +// MinTxFee will return minimum transaction fee +func (ed *EconomicsData) MinTxFee() uint64 { + return ed.minTxFee +} + +// CommunityAddress will return community address +func (ed *EconomicsData) CommunityAddress() string { + return ed.communityAddress +} + +// BurnAddress will return burn address +func (ed *EconomicsData) BurnAddress() string { + return ed.burnAddress +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ed *EconomicsData) IsInterfaceNil() bool { + if ed == nil { + return true + } + return false +} diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go new file mode 100644 index 00000000000..2cb35ac7243 --- /dev/null +++ b/process/economics/economicsData_test.go @@ -0,0 +1,142 @@ +package economics_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/stretchr/testify/assert" +) + +func TestEconomicsData_ShouldWork(t *testing.T) { + t.Parallel() + + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{}) + assert.NotNil(t, economicsData) +} + +func TestEconomicsData_RewardsValue(t *testing.T) { + t.Parallel() + + rewardsValue := uint64(100) + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + RewardsSettings: config.RewardsSettings{ + RewardsValue: rewardsValue, + }, + }) + + value := economicsData.RewardsValue() + assert.Equal(t, rewardsValue, value) +} + +func TestEconomicsData_CommunityPercentage(t *testing.T) { + t.Parallel() + + communityPercentage := 0.50 + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + RewardsSettings: config.RewardsSettings{ + CommunityPercentage: communityPercentage, + }, + }) + + value := economicsData.CommunityPercentage() + assert.Equal(t, communityPercentage, value) +} + +func TestEconomicsData_LeaderPercentage(t *testing.T) { + t.Parallel() + + leaderPercentage := 0.40 + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + RewardsSettings: config.RewardsSettings{ + LeaderPercentage: leaderPercentage, + }, + }) + + value := economicsData.LeaderPercentage() + assert.Equal(t, leaderPercentage, value) +} + +func TestEconomicsData_BurnPercentage(t *testing.T) { + t.Parallel() + + burnPercentage := 0.41 + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + RewardsSettings: config.RewardsSettings{ + BurnPercentage: burnPercentage, + }, + }) + + value := economicsData.BurnPercentage() + assert.Equal(t, burnPercentage, value) +} + +func TestEconomicsData_MinGasPrice(t *testing.T) { + t.Parallel() + + minGasPrice := uint64(500) + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + FeeSettings: config.FeeSettings{ + MinGasPrice: minGasPrice, + }, + }) + + value := economicsData.MinGasPrice() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasLimitForTx(t *testing.T) { + t.Parallel() + + minGasLimitForTx := uint64(1500) + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + FeeSettings: config.FeeSettings{ + MinGasLimitForTx: minGasLimitForTx, + }, + }) + + value := economicsData.MinGasLimitForTx() + assert.Equal(t, minGasLimitForTx, value) +} + +func TestEconomicsData_MinTxFee(t *testing.T) { + t.Parallel() + + minTxFee := uint64(502) + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + FeeSettings: config.FeeSettings{ + MinTxFee: minTxFee, + }, + }) + + value := economicsData.MinTxFee() + assert.Equal(t, minTxFee, value) +} + +func TestEconomicsData_CommunityAddress(t *testing.T) { + t.Parallel() + + communityAddress := "addr1" + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + CommunityAddress: communityAddress, + }, + }) + + value := economicsData.CommunityAddress() + assert.Equal(t, communityAddress, value) +} + +func TestEconomicsData_BurnAddress(t *testing.T) { + t.Parallel() + + burnAddress := "addr2" + economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + BurnAddress: burnAddress, + }, + }) + + value := economicsData.BurnAddress() + assert.Equal(t, burnAddress, value) +} diff --git a/process/errors.go b/process/errors.go index 48f91a46d65..5d939fcd4d6 100644 --- a/process/errors.go +++ b/process/errors.go @@ -91,15 +91,9 @@ var ErrNilRootHash = errors.New("root hash is nil") // ErrWrongNonceInBlock signals the nonce in block is different than expected nonce var ErrWrongNonceInBlock = errors.New("wrong nonce in block") -// ErrWrongNonceInOtherChainBlock signals the nonce in block is different than expected nonce -var ErrWrongNonceInOtherChainBlock = errors.New("wrong nonce in other chain block") - // ErrBlockHashDoesNotMatch signals that header hash does not match with the previous one var ErrBlockHashDoesNotMatch = errors.New("block hash does not match") -// ErrBlockHashDoesNotMatchInOtherChainBlock signals that header hash does not match with the previous one -var ErrBlockHashDoesNotMatchInOtherChainBlock = errors.New("block hash does not match in other chain block") - // ErrMissingTransaction signals that one transaction is missing var ErrMissingTransaction = errors.New("missing transaction") @@ -112,9 +106,6 @@ var ErrUnmarshalWithoutSuccess = errors.New("unmarshal without success") // ErrRootStateDoesNotMatch signals that root state does not match var ErrRootStateDoesNotMatch = errors.New("root state does not match") -// ErrRootStateDoesNotMatchInOtherChainBlock signals that root state does not match -var ErrRootStateDoesNotMatchInOtherChainBlock = errors.New("root state does not match in other chain block") - // ErrAccountStateDirty signals that the accounts were modified before starting the current modification var ErrAccountStateDirty = errors.New("accountState was dirty before starting to change") @@ -310,18 +301,12 @@ var ErrNilPrevRandSeed = errors.New("provided previous rand seed is nil") // ErrNilRequestHeaderHandlerByNonce signals that a nil header request handler by nonce func was provided var ErrNilRequestHeaderHandlerByNonce = errors.New("nil request header handler by nonce") -// ErrLowerRoundInOtherChainBlock signals that header round too low for processing it -var ErrLowerRoundInOtherChainBlock = errors.New("header round is lower than last committed in other chain block") - // ErrLowerRoundInBlock signals that a header round is too low for processing it var ErrLowerRoundInBlock = errors.New("header round is lower than last committed") // ErrRandSeedDoesNotMatch signals that random seed does not match with the previous one var ErrRandSeedDoesNotMatch = errors.New("random seed do not match") -// ErrRandSeedDoesNotMatchInOtherChainBlock signals that seed does not match with the previous one -var ErrRandSeedDoesNotMatchInOtherChainBlock = errors.New("random seed does not match in other chain block") - // ErrHeaderNotFinal signals that header is not final and it should be var ErrHeaderNotFinal = errors.New("header in metablock is not final") @@ -493,5 +478,11 @@ var ErrOverallBalanceChangeFromSC = errors.New("SC output balance updates are wr // ErrNilTxsPoolsCleaner signals that a nil transactions pools cleaner has been provided var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") -// ErrZeroMaxCleanTime signal that cleaning time for pools its less or equals with 0 +// ErrZeroMaxCleanTime signals that cleaning time for pools its less or equals with 0 var ErrZeroMaxCleanTime = errors.New("cleaning time is equal or less than zero") + +// ErrNilEconomicsRewardsHandler signals that rewards handler is nil +var ErrNilEconomicsRewardsHandler = errors.New("nil economics rewards handler") + +// ErrNilEconomicsFeeHandler signals that fee handler is nil +var ErrNilEconomicsFeeHandler = errors.New("nil economics fee handler") diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index fc70456c858..0e3df3ae758 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory/containers" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -20,6 +21,7 @@ type intermediateProcessorsContainerFactory struct { specialAddressHandler process.SpecialAddressHandler store dataRetriever.StorageService poolsHolder dataRetriever.PoolsHolder + economics *economics.EconomicsData } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -31,6 +33,7 @@ func NewIntermediateProcessorsContainerFactory( specialAddressHandler process.SpecialAddressHandler, store dataRetriever.StorageService, poolsHolder dataRetriever.PoolsHolder, + economics *economics.EconomicsData, ) (*intermediateProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -63,6 +66,7 @@ func NewIntermediateProcessorsContainerFactory( specialAddressHandler: specialAddressHandler, store: store, poolsHolder: poolsHolder, + economics: economics, }, nil } @@ -115,6 +119,7 @@ func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateP ppcm.addrConverter, ppcm.store, ppcm.poolsHolder.RewardTransactions(), + ppcm.economics, ) return irp, err diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 3c596728885..e32d70e79c9 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -2,6 +2,7 @@ package shard_test import ( "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" @@ -20,6 +21,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -38,6 +40,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -56,6 +59,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -74,6 +78,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -92,6 +97,7 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { &mock.SpecialAddressHandlerMock{}, nil, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -110,6 +116,7 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, err) @@ -128,6 +135,7 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, dPool, + &economics.EconomicsData{}, ) assert.Nil(t, err) diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 280cf080667..c86f7752b3c 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -26,6 +26,7 @@ type preProcessorsContainerFactory struct { accounts state.AccountsAdapter requestHandler process.RequestHandler rewardsProducer process.InternalTransactionProducer + economicsFee process.FeeHandler } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -43,6 +44,7 @@ func NewPreProcessorsContainerFactory( scResultProcessor process.SmartContractResultProcessor, rewardsTxProcessor process.RewardTransactionProcessor, rewardsProducer process.InternalTransactionProducer, + economicsFee process.FeeHandler, ) (*preProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -84,6 +86,9 @@ func NewPreProcessorsContainerFactory( if rewardsProducer == nil || rewardsProducer.IsInterfaceNil() { return nil, process.ErrNilInternalTransactionProducer } + if economicsFee == nil || economicsFee.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } return &preProcessorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -99,6 +104,7 @@ func NewPreProcessorsContainerFactory( rewardsTxProcessor: rewardsTxProcessor, requestHandler: requestHandler, rewardsProducer: rewardsProducer, + economicsFee: economicsFee, }, nil } @@ -149,6 +155,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ppcm.shardCoordinator, ppcm.accounts, ppcm.requestHandler.RequestTransaction, + ppcm.economicsFee, ) return txPreprocessor, err diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index 7d25e5015a6..a330b6a09c3 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -25,6 +25,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -48,6 +49,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -71,6 +73,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -94,6 +97,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -117,6 +121,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -140,6 +145,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -163,6 +169,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -186,6 +193,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -209,6 +217,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -232,6 +241,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { nil, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) @@ -255,6 +265,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, nil, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilRewardsTxProcessor, err) @@ -278,6 +289,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -301,6 +313,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Nil(t, err) @@ -328,6 +341,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Nil(t, err) @@ -365,6 +379,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Nil(t, err) @@ -411,6 +426,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerMock{}, ) assert.Nil(t, err) diff --git a/process/interface.go b/process/interface.go index d4c2aa377cb..afa4665795c 100644 --- a/process/interface.go +++ b/process/interface.go @@ -408,3 +408,27 @@ type PoolsCleaner interface { NumRemovedTxs() uint64 IsInterfaceNil() bool } + +// RewardsHandler will return information about rewards +type RewardsHandler interface { + RewardsValue() uint64 + CommunityPercentage() float64 + LeaderPercentage() float64 + BurnPercentage() float64 + IsInterfaceNil() bool +} + +// FeeHandler will return information about fees +type FeeHandler interface { + MinGasPrice() uint64 + MinGasLimitForTx() uint64 + MinTxFee() uint64 + IsInterfaceNil() bool +} + +// EconomicsAddressesHandler will return information about economics addresses +type EconomicsAddressesHandler interface { + CommunityAddress() string + BurnAddress() string + IsInterfaceNil() bool +} diff --git a/process/mock/feeHandlerMock.go b/process/mock/feeHandlerMock.go new file mode 100644 index 00000000000..881d128d98b --- /dev/null +++ b/process/mock/feeHandlerMock.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerMock struct { + MinGasPriceCalled func() uint64 + MinGasLimitForTxCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhm *FeeHandlerMock) MinGasPrice() uint64 { + return fhm.MinGasPriceCalled() +} + +func (fhm *FeeHandlerMock) MinGasLimitForTx() uint64 { + return fhm.MinGasLimitForTxCalled() +} + +func (fhm *FeeHandlerMock) MinTxFee() uint64 { + return fhm.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhm *FeeHandlerMock) IsInterfaceNil() bool { + if fhm == nil { + return true + } + return false +} diff --git a/process/mock/rewardsHandlerMock.go b/process/mock/rewardsHandlerMock.go new file mode 100644 index 00000000000..f6e29d90424 --- /dev/null +++ b/process/mock/rewardsHandlerMock.go @@ -0,0 +1,32 @@ +package mock + +type RewardsHandlerMock struct { + RewardsValueCalled func() uint64 + CommunityPercentageCalled func() float64 + LeaderPercentageCalled func() float64 + BurnPercentageCalled func() float64 +} + +func (rhm *RewardsHandlerMock) RewardsValue() uint64 { + return rhm.RewardsValueCalled() +} + +func (rhm *RewardsHandlerMock) CommunityPercentage() float64 { + return rhm.CommunityPercentageCalled() +} + +func (rhm *RewardsHandlerMock) LeaderPercentage() float64 { + return rhm.LeaderPercentageCalled() +} + +func (rhm *RewardsHandlerMock) BurnPercentage() float64 { + return rhm.BurnPercentageCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rhm *RewardsHandlerMock) IsInterfaceNil() bool { + if rhm == nil { + return true + } + return false +} diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 8593e051f1f..edd3309d0bc 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -29,15 +29,3 @@ func (txProc *txProcessor) MoveBalances(acntSrc, acntDst *state.Account, value * func (txProc *txProcessor) IncreaseNonce(acntSrc *state.Account) error { return txProc.increaseNonce(acntSrc) } - -func (txProc *txProcessor) SetMinTxFee(minTxFee uint64) { - mutTxFee.Lock() - minTxFee = minTxFee - mutTxFee.Unlock() -} - -func (txProc *txProcessor) SetMinGasPrice(minGasPrice uint64) { - mutTxFee.Lock() - minGasPrice = minGasPrice - mutTxFee.Unlock() -} diff --git a/process/transaction/process.go b/process/transaction/process.go index 9a6ca6971ec..31203ee5806 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -16,14 +16,6 @@ import ( var log = logger.DefaultLogger() -// minGasPrice is the minimal gas price to be paid for any transaction -// TODO: Set minGasPrice and minTxFee to some positive value (TBD) -var minGasPrice = uint64(0) - -// minTxFee is the minimal fee to be paid for any transaction -var minTxFee = uint64(0) -var mutTxFee sync.RWMutex - // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { accounts state.AccountsAdapter @@ -34,6 +26,8 @@ type txProcessor struct { txFeeHandler process.TransactionFeeHandler shardCoordinator sharding.Coordinator txTypeHandler process.TxTypeHandler + economicsFee process.FeeHandler + mutTxFee sync.RWMutex } // NewTxProcessor creates a new txProcessor engine @@ -46,6 +40,7 @@ func NewTxProcessor( scProcessor process.SmartContractProcessor, txFeeHandler process.TransactionFeeHandler, txTypeHandler process.TxTypeHandler, + economicsFee process.FeeHandler, ) (*txProcessor, error) { if accounts == nil || accounts.IsInterfaceNil() { @@ -72,6 +67,9 @@ func NewTxProcessor( if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { return nil, process.ErrNilTxTypeHandler } + if economicsFee == nil || economicsFee.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } return &txProcessor{ accounts: accounts, @@ -82,6 +80,8 @@ func NewTxProcessor( scProcessor: scProcessor, txFeeHandler: txFeeHandler, txTypeHandler: txTypeHandler, + economicsFee: economicsFee, + mutTxFee: sync.RWMutex{}, }, nil } @@ -132,11 +132,11 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) txDataLen := int64(len(tx.Data)) - mutTxFee.RLock() + txProc.mutTxFee.RLock() minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(minGasPrice)) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(minTxFee)) - mutTxFee.RUnlock() + minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinTxFee())) + txProc.mutTxFee.RUnlock() if minFee.Cmp(cost) > 0 { return nil, process.ErrNotEnoughFeeInTransactions diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 014c039646c..61b56bffd64 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -24,6 +24,20 @@ func generateRandomByteSlice(size int) []byte { return buff } +func FeeHandlerMock() *mock.FeeHandlerMock { + return &mock.FeeHandlerMock{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitForTxCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + } +} + func createAccountStub(sndAddr, rcvAddr []byte, acntSrc, acntDst *state.Account, ) *mock.AccountsStub { @@ -54,6 +68,7 @@ func createTxProcessor() txproc.TxProcessor { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) return txProc @@ -73,6 +88,7 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -91,6 +107,7 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilHasher, err) @@ -109,6 +126,7 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -127,6 +145,7 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -145,6 +164,7 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -163,6 +183,7 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { nil, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -181,6 +202,7 @@ func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { &mock.SCProcessorMock{}, nil, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilUnsignedTxHandler, err) @@ -199,6 +221,7 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Nil(t, err) @@ -221,6 +244,7 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) addressConv.Fail = true @@ -262,6 +286,7 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) adr1 := mock.NewAddressMock([]byte{65}) @@ -288,6 +313,7 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) adr1 := mock.NewAddressMock([]byte{65}) @@ -331,6 +357,7 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -383,6 +410,7 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -420,6 +448,7 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -448,6 +477,7 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -690,6 +720,7 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) addressConv.Fail = true @@ -712,6 +743,7 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) tx := transaction.Transaction{} @@ -750,6 +782,7 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -803,6 +836,7 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -848,10 +882,9 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) - execTx.SetMinTxFee(0) - execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, 4, journalizeCalled) @@ -905,10 +938,9 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) - execTx.SetMinTxFee(0) - execTx.SetMinGasPrice(0) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, 1, journalizeCalled) @@ -962,6 +994,7 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1011,11 +1044,9 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) - execTx.SetMinTxFee(0) - execTx.SetMinGasPrice(0) - err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.Equal(t, uint64(5), acntSrc.Nonce) @@ -1066,6 +1097,7 @@ func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1133,6 +1165,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { return process.SCInvoking, nil }, }, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1193,6 +1226,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * &mock.TxTypeHandlerMock{ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { return process.SCInvoking, nil }}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1265,11 +1299,9 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod scProcessorMock, &mock.UnsignedTxHandlerMock{}, computeType, + FeeHandlerMock(), ) - execTx.SetMinTxFee(0) - execTx.SetMinGasPrice(0) - err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.False(t, wasCalled) From d12bd69052a1558bc7af3e432838a260a8f13154 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Wed, 2 Oct 2019 20:27:06 +0300 Subject: [PATCH 186/234] * Added usedInBlock flag to differentiate requested missing headers and requested missing finals headers --- process/block/export_test.go | 2 +- process/block/shardblock.go | 121 +++++++++++++++++-------------- process/block/shardblock_test.go | 4 +- 3 files changed, 69 insertions(+), 58 deletions(-) diff --git a/process/block/export_test.go b/process/block/export_test.go index 196de84e15b..ce15ee1afd5 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -263,7 +263,7 @@ func (sp *shardProcessor) CreateAndProcessCrossMiniBlocksDstMe( maxItemsInBlock uint32, round uint64, haveTime func() bool, -) (block.MiniBlockSlice, [][]byte, uint32, error) { +) (block.MiniBlockSlice, uint32, uint32, error) { return sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 766ef1ad931..1f604fa7abd 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -26,7 +26,8 @@ type nonceAndHashInfo struct { } type hdrInfo struct { - hdr data.HeaderHandler + usedInBlock bool + hdr data.HeaderHandler } type hdrForBlock struct { @@ -251,7 +252,7 @@ func (sp *shardProcessor) ProcessBlock( go sp.checkAndRequestIfMetaHeadersMissing(header.Round) }() - err = sp.checkMetaHeadersValidityAndFinality(header) + err = sp.checkMetaHeadersValidityAndFinality() if err != nil { return err } @@ -320,7 +321,7 @@ func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoc } // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction -func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { +func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return err @@ -329,14 +330,12 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head currAddedMetaHdrs := make([]*block.MetaBlock, 0) sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, metaHash := range header.MetaBlockHashes { - value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaHash)] - if !ok { - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return process.ErrNilMetaBlockHeader + for _, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue } - metaHdr, ok := value.hdr.(*block.MetaBlock) + metaHdr, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrWrongTypeAssertion @@ -350,15 +349,18 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head return nil } - sort.Slice(currAddedMetaHdrs, func(i, j int) bool { - return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce - }) + if len(currAddedMetaHdrs) > 1 { + sort.Slice(currAddedMetaHdrs, func(i, j int) bool { + return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce + }) + } for _, metaHdr := range currAddedMetaHdrs { err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err } + tmpNotedHdr = metaHdr } @@ -918,6 +920,10 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue + } + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() @@ -982,6 +988,10 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue + } + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() @@ -1094,34 +1104,34 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] // upon receiving, it parses the new metablock and requests miniblocks and transactions // which destination is the current shard func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { - metaBlksCache := sp.dataPool.MetaBlocks() - if metaBlksCache == nil { - return - } - - metaHdrsNoncesCache := sp.dataPool.HeadersNonces() - if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { return } - miniBlksCache := sp.dataPool.MiniBlocks() - if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { - return - } + //metaHdrsNoncesCache := sp.dataPool.HeadersNonces() + //if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { + // return + //} + // + //miniBlksCache := sp.dataPool.MiniBlocks() + //if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { + // return + //} - obj, ok := metaBlksCache.Peek(metaBlockHash) + obj, ok := metaBlockPool.Peek(metaBlockHash) if !ok { return } - metaBlock, ok := obj.(data.HeaderHandler) + metaBlock, ok := obj.(*block.MetaBlock) if !ok { return } log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", core.ToB64(metaBlockHash), - metaBlock.GetNonce())) + metaBlock.Nonce)) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() @@ -1131,8 +1141,8 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)].hdr = metaBlock sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.GetNonce() + if metaBlock.Nonce > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = metaBlock.Nonce } } @@ -1150,7 +1160,7 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if missingHdrs == 0 || missingFinalHdrs == 0 { + if missingHdrs == 0 && missingFinalHdrs == 0 { sp.chRcvAllMetaHdrs <- true } } else { @@ -1192,7 +1202,7 @@ func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { continue } - sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] = &hdrInfo{hdr: metaBlock} + sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] = &hdrInfo{hdr: metaBlock, usedInBlock: false} } return requestedBlockHeaders @@ -1205,11 +1215,11 @@ func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint return 0, 0 } - missingHeaderHashes := sp.computeMissingAndExistingMetaHeaders(header) + missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(header) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - for _, hash := range missingHeaderHashes { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil} + for _, hash := range missingHeadersHashes { + sp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) } @@ -1225,7 +1235,7 @@ func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint } func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Header) [][]byte { - missingHeaders := make([][]byte, 0) + missingHeadersHashes := make([][]byte, 0) sp.currHighestMetaHdrNonce = uint64(0) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() @@ -1235,12 +1245,12 @@ func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Hea sp.dataPool.MetaBlocks()) if err != nil { - missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) + missingHeadersHashes = append(missingHeadersHashes, header.MetaBlockHashes[i]) sp.hdrsForCurrBlock.missingHdrs++ continue } - sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr} + sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} if hdr.Nonce > sp.currHighestMetaHdrNonce { sp.currHighestMetaHdrNonce = hdr.Nonce @@ -1248,18 +1258,18 @@ func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Hea } sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return missingHeaders + return missingHeadersHashes } func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) error { - mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) + miniBlockMetaHashes, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round) if err != nil { return err } - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - if _, ok := mMiniBlockMeta[mbHash]; !ok { + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for hash := range crossMiniBlockHashes { + if _, ok := miniBlockMetaHashes[hash]; !ok { return process.ErrCrossShardMBWithoutConfirmationFromMeta } } @@ -1267,26 +1277,23 @@ func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) erro return nil } -func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes [][]byte) (map[string][]byte, error) { +func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64) (map[string][]byte, error) { lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return nil, err } - mMiniBlockMeta := make(map[string][]byte) + miniBlockMetaHashes := make(map[string][]byte) sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, metaHash := range metaHashes { - value, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaHash)] - if !ok { + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { continue } - - hdr, ok := value.hdr.(*block.MetaBlock) + hdr, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { continue } - if hdr.GetRound() > round { continue } @@ -1297,14 +1304,14 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes continue } - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - mMiniBlockMeta[mbHash] = metaHash + crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for hash := range crossMiniBlockHashes { + miniBlockMetaHashes[hash] = []byte(metaBlockHash) } } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return mMiniBlockMeta, nil + return miniBlockMetaHashes, nil } func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { @@ -1444,7 +1451,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( } if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr} + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} hdrsAdded++ lastMetaHdr = hdr continue @@ -1473,7 +1480,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( txsAdded = txsAdded + currTxsAdded if currTxsAdded > 0 { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr} + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} hdrsAdded++ } @@ -1607,6 +1614,10 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() usedMetaHdrsInfo := make([]*nonceAndHashInfo, 0) for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue + } + usedMetaHdrsInfo = append(usedMetaHdrsInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 7aa5d722264..da514523322 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2690,7 +2690,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) assert.Equal(t, err == nil, true) assert.Equal(t, len(miniBlockSlice) == 0, true) - assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) + assert.Equal(t, usedMetaHdrsHashes, uint32(0)) assert.Equal(t, noOfTxs, uint32(0)) } @@ -2797,7 +2797,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) assert.Equal(t, 0, len(miniBlocksReturned)) - assert.Equal(t, 0, len(usedMetaHdrsHashes)) + assert.Equal(t, uint32(0), usedMetaHdrsHashes) assert.Equal(t, uint32(0), nrTxAdded) assert.Nil(t, err) } From d2b1ce79f45ef50912d54c099ff04cfc93560b0e Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 3 Oct 2019 08:49:26 +0300 Subject: [PATCH 187/234] typo fixed --- process/errors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/errors.go b/process/errors.go index b6cdd052428..eaa43ce47dc 100644 --- a/process/errors.go +++ b/process/errors.go @@ -478,7 +478,7 @@ var ErrOverallBalanceChangeFromSC = errors.New("SC output balance updates are wr // ErrNilTxsPoolsCleaner signals that a nil transactions pools cleaner has been provided var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") -// ErrZeroMaxCleanTime signals that cleaning time for pools its less or equal with 0 +// ErrZeroMaxCleanTime signals that cleaning time for pools is less or equal with 0 var ErrZeroMaxCleanTime = errors.New("cleaning time is equal or less than zero") // ErrNilEconomicsRewardsHandler signals that rewards handler is nil From 6d52344be95c25a4808c953ff194a50d9c0a80a2 Mon Sep 17 00:00:00 2001 From: Sebastian Marian <36901555+SebastianMarian@users.noreply.github.com> Date: Thu, 3 Oct 2019 11:54:23 +0300 Subject: [PATCH 188/234] EN-4203-Remove-old-block-tracker-from-elrond-go (#498) * Removed old block tracker from project * Fixed after review * Removed TODO --- cmd/node/factory/structs.go | 103 ++++----- cmd/node/main.go | 3 +- consensus/mock/blocksTrackerMock.go | 41 ---- consensus/mock/consensusDataContainerMock.go | 5 - consensus/mock/mockTestInitializer.go | 5 - consensus/mock/sposWorkerMock.go | 5 - consensus/spos/bls/blsSubroundsFactory.go | 1 - consensus/spos/bls/subroundEndRound.go | 2 - consensus/spos/bn/bnSubroundsFactory.go | 1 - consensus/spos/bn/subroundEndRound.go | 2 - consensus/spos/commonSubround/base_test.go | 3 - .../spos/commonSubround/subroundStartRound.go | 12 - .../commonSubround/subroundStartRound_test.go | 25 --- consensus/spos/consensusCore.go | 8 - consensus/spos/consensusCoreValidator.go | 3 - consensus/spos/consensusCoreValidator_test.go | 2 - consensus/spos/consensusCore_test.go | 42 ---- consensus/spos/constants.go | 4 - consensus/spos/errors.go | 6 - consensus/spos/export_test.go | 8 - consensus/spos/interface.go | 4 - consensus/spos/worker.go | 31 --- consensus/spos/worker_test.go | 200 ----------------- integrationTests/consensus/testInitializer.go | 8 - integrationTests/mock/blocksTrackerMock.go | 41 ---- .../smartContract/testInitilalizer.go | 12 +- integrationTests/testProcessorNode.go | 15 +- integrationTests/testSyncNode.go | 13 -- node/defineOptions.go | 11 - node/errors.go | 3 - node/node.go | 3 - process/block/argProcessor.go | 1 - process/block/baseProcess_test.go | 1 - process/block/export_test.go | 1 - process/block/shardblock.go | 16 -- process/block/shardblock_test.go | 76 ------- process/interface.go | 12 +- process/mock/blocksTrackerMock.go | 41 ---- process/track/metaBlock.go | 46 ---- process/track/metaBlock_test.go | 41 ---- process/track/shardBlock.go | 170 --------------- process/track/shardBlock_test.go | 206 ------------------ 42 files changed, 44 insertions(+), 1189 deletions(-) delete mode 100644 consensus/mock/blocksTrackerMock.go delete mode 100644 integrationTests/mock/blocksTrackerMock.go delete mode 100644 process/mock/blocksTrackerMock.go delete mode 100644 process/track/metaBlock.go delete mode 100644 process/track/metaBlock_test.go delete mode 100644 process/track/shardBlock.go delete mode 100644 process/track/shardBlock_test.go diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 838ccb7cb29..526d0634b46 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -63,7 +63,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" processSync "github.com/ElrondNetwork/elrond-go/process/sync" - "github.com/ElrondNetwork/elrond-go/process/track" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -94,6 +93,7 @@ const ( var log = logger.DefaultLogger() //TODO: Extract all others error messages from this file in some defined errors + // ErrCreateForkDetector signals that a fork detector could not be created var ErrCreateForkDetector = errors.New("could not create fork detector") @@ -144,7 +144,6 @@ type Process struct { Rounder consensus.Rounder ForkDetector process.ForkDetector BlockProcessor process.BlockProcessor - BlockTracker process.BlocksTracker } type coreComponentsFactoryArgs struct { @@ -509,7 +508,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - blockProcessor, blockTracker, err := newBlockProcessorAndTracker( + blockProcessor, err := newBlockProcessor( resolversFinder, args.shardCoordinator, args.nodesCoordinator, @@ -519,7 +518,6 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err args.state, forkDetector, shardsGenesisBlocks, - args.nodesConfig, args.coreServiceContainer, ) @@ -533,7 +531,6 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err Rounder: rounder, ForkDetector: forkDetector, BlockProcessor: blockProcessor, - BlockTracker: blockTracker, }, nil } @@ -1432,7 +1429,7 @@ func newForkDetector( return nil, ErrCreateForkDetector } -func newBlockProcessorAndTracker( +func newBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, @@ -1442,24 +1439,23 @@ func newBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, -) (process.BlockProcessor, process.BlocksTracker, error) { +) (process.BlockProcessor, error) { communityAddr := economics.CommunityAddress() burnAddr := economics.BurnAddress() if communityAddr == "" || burnAddr == "" { - return nil, nil, errors.New("rewards configuration missing") + return nil, errors.New("rewards configuration missing") } communityAddress, err := hex.DecodeString(communityAddr) if err != nil { - return nil, nil, err + return nil, err } burnAddress, err := hex.DecodeString(burnAddr) if err != nil { - return nil, nil, err + return nil, err } specialAddressHolder, err := address.NewSpecialAddressHolder( @@ -1470,12 +1466,11 @@ func newBlockProcessorAndTracker( nodesCoordinator, ) if err != nil { - return nil, nil, err + return nil, err } - // TODO: remove nodesConfig as no longer needed with nodes coordinator available if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker( + return newShardBlockProcessor( resolversFinder, shardCoordinator, nodesCoordinator, @@ -1485,13 +1480,12 @@ func newBlockProcessorAndTracker( state, forkDetector, shardsGenesisBlocks, - nodesConfig, coreServiceContainer, economics, ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker( + return newMetaBlockProcessor( resolversFinder, shardCoordinator, nodesCoordinator, @@ -1505,10 +1499,10 @@ func newBlockProcessorAndTracker( ) } - return nil, nil, errors.New("could not create block processor and tracker") + return nil, errors.New("could not create block processor and tracker") } -func newShardBlockProcessorAndTracker( +func newShardBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, @@ -1518,23 +1512,22 @@ func newShardBlockProcessorAndTracker( state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, - nodesConfig *sharding.NodesSetup, coreServiceContainer serviceContainer.Core, economics *economics.EconomicsData, -) (process.BlockProcessor, process.BlocksTracker, error) { +) (process.BlockProcessor, error) { argsParser, err := smartContract.NewAtArgumentParser() if err != nil { - return nil, nil, err + return nil, err } vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) if err != nil { - return nil, nil, err + return nil, err } vmContainer, err := vmFactory.Create() if err != nil { - return nil, nil, err + return nil, err } interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( @@ -1548,32 +1541,32 @@ func newShardBlockProcessorAndTracker( economics, ) if err != nil { - return nil, nil, err + return nil, err } interimProcContainer, err := interimProcFactory.Create() if err != nil { - return nil, nil, err + return nil, err } scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) if err != nil { - return nil, nil, err + return nil, err } rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) if err != nil { - return nil, nil, err + return nil, err } rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) if !ok { - return nil, nil, process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } internalTransactionProducer, ok := rewardsTxInterim.(process.InternalTransactionProducer) if !ok { - return nil, nil, process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } scProcessor, err := smartContract.NewSmartContractProcessor( @@ -1589,7 +1582,7 @@ func newShardBlockProcessorAndTracker( rewardsTxHandler, ) if err != nil { - return nil, nil, err + return nil, err } requestHandler, err := requestHandlers.NewShardResolverRequestHandler( @@ -1603,7 +1596,7 @@ func newShardBlockProcessorAndTracker( MaxTxsToRequest, ) if err != nil { - return nil, nil, err + return nil, err } rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( @@ -1613,12 +1606,12 @@ func newShardBlockProcessorAndTracker( rewardsTxInterim, ) if err != nil { - return nil, nil, err + return nil, err } txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) if err != nil { - return nil, nil, err + return nil, err } transactionProcessor, err := transaction.NewTxProcessor( @@ -1633,17 +1626,7 @@ func newShardBlockProcessorAndTracker( economics, ) if err != nil { - return nil, nil, errors.New("could not create transaction processor: " + err.Error()) - } - - blockTracker, err := track.NewShardBlockTracker( - data.Datapool, - core.Marshalizer, - shardCoordinator, - data.Store, - ) - if err != nil { - return nil, nil, err + return nil, errors.New("could not create transaction processor: " + err.Error()) } preProcFactory, err := shard.NewPreProcessorsContainerFactory( @@ -1663,12 +1646,12 @@ func newShardBlockProcessorAndTracker( economics, ) if err != nil { - return nil, nil, err + return nil, err } preProcContainer, err := preProcFactory.Create() if err != nil { - return nil, nil, err + return nil, err } txCoordinator, err := coordinator.NewTransactionCoordinator( @@ -1680,7 +1663,7 @@ func newShardBlockProcessorAndTracker( interimProcContainer, ) if err != nil { - return nil, nil, err + return nil, err } txPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner( @@ -1690,7 +1673,7 @@ func newShardBlockProcessorAndTracker( state.AddressConverter, ) if err != nil { - return nil, nil, err + return nil, err } argumentsBaseProcessor := block.ArgBaseProcessor{ @@ -1710,25 +1693,24 @@ func newShardBlockProcessorAndTracker( arguments := block.ArgShardProcessor{ ArgBaseProcessor: &argumentsBaseProcessor, DataPool: data.Datapool, - BlocksTracker: blockTracker, TxCoordinator: txCoordinator, TxsPoolsCleaner: txPoolsCleaner, } blockProcessor, err := block.NewShardProcessor(arguments) if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) + return nil, errors.New("could not create block processor: " + err.Error()) } err = blockProcessor.SetAppStatusHandler(core.StatusHandler) if err != nil { - return nil, nil, err + return nil, err } - return blockProcessor, blockTracker, nil + return blockProcessor, nil } -func newMetaBlockProcessorAndTracker( +func newMetaBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, @@ -1739,18 +1721,13 @@ func newMetaBlockProcessorAndTracker( forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, -) (process.BlockProcessor, process.BlocksTracker, error) { +) (process.BlockProcessor, error) { requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( resolversFinder, factory.ShardHeadersForMetachainTopic, factory.MetachainBlocksTopic) if err != nil { - return nil, nil, err - } - - blockTracker, err := track.NewMetaBlockTracker() - if err != nil { - return nil, nil, err + return nil, err } metaProcessor, err := block.NewMetaProcessor( @@ -1769,15 +1746,15 @@ func newMetaBlockProcessorAndTracker( core.Uint64ByteSliceConverter, ) if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) + return nil, errors.New("could not create block processor: " + err.Error()) } err = metaProcessor.SetAppStatusHandler(core.StatusHandler) if err != nil { - return nil, nil, err + return nil, err } - return metaProcessor, blockTracker, nil + return metaProcessor, nil } func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { diff --git a/cmd/node/main.go b/cmd/node/main.go index e71f958043f..09905e6e0a6 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -45,7 +45,7 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/google/gops/agent" "github.com/urfave/cli" @@ -1207,7 +1207,6 @@ func createNode( node.WithConsensusGroupSize(int(consensusGroupSize)), node.WithSyncer(syncer), node.WithBlockProcessor(process.BlockProcessor), - node.WithBlockTracker(process.BlockTracker), node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(process.Rounder), node.WithShardCoordinator(shardCoordinator), diff --git a/consensus/mock/blocksTrackerMock.go b/consensus/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/consensus/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index d360818ea99..107f20dd69a 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -14,7 +14,6 @@ import ( type ConsensusCoreMock struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger chronologyHandler consensus.ChronologyHandler @@ -37,10 +36,6 @@ func (cdc *ConsensusCoreMock) BlockProcessor() process.BlockProcessor { return cdc.blockProcessor } -func (cdc *ConsensusCoreMock) BlocksTracker() process.BlocksTracker { - return cdc.blocksTracker -} - func (cdc *ConsensusCoreMock) BootStrapper() process.Bootstrapper { return cdc.bootstrapper } diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index e45adea2759..a290fa5c40d 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -98,10 +98,6 @@ func InitConsensusCore() *ConsensusCoreMock { }, } blockProcessorMock := InitBlockProcessorMock() - blockTrackerMock := &BlocksTrackerMock{ - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - }, - } bootstrapperMock := &BootstrapperMock{} broadcastMessengerMock := &BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -127,7 +123,6 @@ func InitConsensusCore() *ConsensusCoreMock { container := &ConsensusCoreMock{ blockChain, blockProcessorMock, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, chronologyHandlerMock, diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index 5faa06ebc25..80b060ee107 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -19,7 +19,6 @@ type SposWorkerMock struct { GetBroadcastBlockCalled func(data.BodyHandler, data.HeaderHandler) error GetBroadcastHeaderCalled func(data.HeaderHandler) error ExecuteStoredMessagesCalled func() - BroadcastUnnotarisedBlocksCalled func() } func (sposWorkerMock *SposWorkerMock) AddReceivedMessageCall(messageType consensus.MessageType, @@ -55,10 +54,6 @@ func (sposWorkerMock *SposWorkerMock) ExecuteStoredMessages() { sposWorkerMock.ExecuteStoredMessagesCalled() } -func (sposWorkerMock *SposWorkerMock) BroadcastUnnotarisedBlocks() { - sposWorkerMock.BroadcastUnnotarisedBlocksCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (sposWorkerMock *SposWorkerMock) IsInterfaceNil() bool { if sposWorkerMock == nil { diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index c6d455c0449..dafba06a679 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -129,7 +129,6 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, - fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { return err diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index d376d5c84b4..9d0b041e395 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -111,8 +111,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - sr.BlocksTracker().SetBlockBroadcastRound(sr.Header.GetNonce(), sr.RoundIndex) - log.Info(fmt.Sprintf("%sStep 3: BlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) err = sr.broadcastMiniBlocksAndTransactions() diff --git a/consensus/spos/bn/bnSubroundsFactory.go b/consensus/spos/bn/bnSubroundsFactory.go index 18d2c4b3fdc..9884f9ed029 100644 --- a/consensus/spos/bn/bnSubroundsFactory.go +++ b/consensus/spos/bn/bnSubroundsFactory.go @@ -149,7 +149,6 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, - fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { diff --git a/consensus/spos/bn/subroundEndRound.go b/consensus/spos/bn/subroundEndRound.go index e055260bc07..8607ffd4158 100644 --- a/consensus/spos/bn/subroundEndRound.go +++ b/consensus/spos/bn/subroundEndRound.go @@ -106,8 +106,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - sr.BlocksTracker().SetBlockBroadcastRound(sr.Header.GetNonce(), sr.RoundIndex) - log.Info(fmt.Sprintf("%sStep 6: TxBlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) err = sr.broadcastMiniBlocksAndTransactions() diff --git a/consensus/spos/commonSubround/base_test.go b/consensus/spos/commonSubround/base_test.go index bd2f4ab07f3..00eaa5c98e6 100644 --- a/consensus/spos/commonSubround/base_test.go +++ b/consensus/spos/commonSubround/base_test.go @@ -115,6 +115,3 @@ func getSubroundName(subroundId int) string { // executeStoredMessages tries to execute all the messages received which are valid for execution func executeStoredMessages() { } - -func broadcastUnnotarisedBlocks() { -} diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index c4a164fcfbd..33863e98775 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -19,7 +19,6 @@ type SubroundStartRound struct { processingThresholdPercentage int getSubroundName func(subroundId int) string executeStoredMessages func() - broadcastUnnotarisedBlocks func() appStatusHandler core.AppStatusHandler } @@ -31,11 +30,9 @@ func NewSubroundStartRound( processingThresholdPercentage int, getSubroundName func(subroundId int) string, executeStoredMessages func(), - broadcastUnnotarisedBlocks func(), ) (*SubroundStartRound, error) { err := checkNewSubroundStartRoundParams( baseSubround, - broadcastUnnotarisedBlocks, ) if err != nil { return nil, err @@ -46,7 +43,6 @@ func NewSubroundStartRound( processingThresholdPercentage, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, statusHandler.NewNilStatusHandler(), } srStartRound.Job = srStartRound.doStartRoundJob @@ -58,7 +54,6 @@ func NewSubroundStartRound( func checkNewSubroundStartRoundParams( baseSubround *spos.Subround, - broadcastUnnotarisedBlocks func(), ) error { if baseSubround == nil { return spos.ErrNilSubround @@ -66,9 +61,6 @@ func checkNewSubroundStartRoundParams( if baseSubround.ConsensusState == nil { return spos.ErrNilConsensusState } - if broadcastUnnotarisedBlocks == nil { - return spos.ErrNilBroadcastUnnotarisedBlocks - } err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) @@ -185,10 +177,6 @@ func (sr *SubroundStartRound) initCurrentRound() bool { sr.SetStatus(sr.Current(), spos.SsFinished) - if leader == sr.SelfPubKey() { - //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary - } - // execute stored messages which were received in this new round but before this initialisation go sr.executeStoredMessages() diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 77ecb1d5729..911ae2cc9fb 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -19,7 +19,6 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) return startRound, err @@ -55,7 +54,6 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) *c processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) return srStartRound @@ -75,7 +73,6 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSubroundShouldFail(t *testin processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) assert.Nil(t, srStartRound) @@ -130,28 +127,6 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * assert.Equal(t, spos.ErrNilConsensusState, err) } -func TestSubroundStartRound_NewSubroundStartRoundNilBroadcastUnnotarisedBlocksFunctionShouldFail(t *testing.T) { - t.Parallel() - - container := mock.InitConsensusCore() - consensusState := initConsensusState() - ch := make(chan bool, 1) - - sr, _ := defaultSubround(consensusState, ch, container) - - srStartRound, err := commonSubround.NewSubroundStartRound( - sr, - extend, - processingThresholdPercent, - getSubroundName, - executeStoredMessages, - nil, - ) - - assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilBroadcastUnnotarisedBlocks, err) -} - func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerShouldFail(t *testing.T) { t.Parallel() diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 88aef489bb8..42d6bc39d0b 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -16,7 +16,6 @@ import ( type ConsensusCore struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger chronologyHandler consensus.ChronologyHandler @@ -35,7 +34,6 @@ type ConsensusCore struct { func NewConsensusCore( blockChain data.ChainHandler, blockProcessor process.BlockProcessor, - blocksTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, chronologyHandler consensus.ChronologyHandler, @@ -53,7 +51,6 @@ func NewConsensusCore( consensusCore := &ConsensusCore{ blockChain, blockProcessor, - blocksTracker, bootstrapper, broadcastMessenger, chronologyHandler, @@ -86,11 +83,6 @@ func (cc *ConsensusCore) BlockProcessor() process.BlockProcessor { return cc.blockProcessor } -// BlocksTracker gets the BlocksTracker stored in the ConsensusCore -func (cc *ConsensusCore) BlocksTracker() process.BlocksTracker { - return cc.blocksTracker -} - // BootStrapper gets the Bootstrapper stored in the ConsensusCore func (cc *ConsensusCore) BootStrapper() process.Bootstrapper { return cc.bootstrapper diff --git a/consensus/spos/consensusCoreValidator.go b/consensus/spos/consensusCoreValidator.go index 50ca27e1a36..b5a1026e6f6 100644 --- a/consensus/spos/consensusCoreValidator.go +++ b/consensus/spos/consensusCoreValidator.go @@ -11,9 +11,6 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error { if container.BlockProcessor() == nil || container.BlockProcessor().IsInterfaceNil() { return ErrNilBlockProcessor } - if container.BlocksTracker() == nil || container.BlocksTracker().IsInterfaceNil() { - return ErrNilBlocksTracker - } if container.BootStrapper() == nil || container.BootStrapper().IsInterfaceNil() { return ErrNilBootstrapper } diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index d55fac01683..74c22310434 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -10,7 +10,6 @@ import ( func initConsensusDataContainer() *ConsensusCore { blockChain := &mock.BlockChainMock{} blockProcessorMock := mock.InitBlockProcessorMock() - blocksTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} chronologyHandlerMock := mock.InitChronologyHandlerMock() @@ -27,7 +26,6 @@ func initConsensusDataContainer() *ConsensusCore { return &ConsensusCore{ blockChain: blockChain, blockProcessor: blockProcessorMock, - blocksTracker: blocksTrackerMock, bootstrapper: bootstrapperMock, broadcastMessenger: broadcastMessengerMock, chronologyHandler: chronologyHandlerMock, diff --git a/consensus/spos/consensusCore_test.go b/consensus/spos/consensusCore_test.go index 11cd925ffdd..ff62458bd89 100644 --- a/consensus/spos/consensusCore_test.go +++ b/consensus/spos/consensusCore_test.go @@ -16,7 +16,6 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( nil, consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -43,7 +42,6 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), nil, - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -62,33 +60,6 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilBlockProcessor, err) } -func TestConsensusCore_WithNilBlocksTrackerShouldFail(t *testing.T) { - t.Parallel() - - consensusCoreMock := mock.InitConsensusCore() - - consensusCore, err := spos.NewConsensusCore( - consensusCoreMock.Blockchain(), - consensusCoreMock.BlockProcessor(), - nil, - consensusCoreMock.BootStrapper(), - consensusCoreMock.BroadcastMessenger(), - consensusCoreMock.Chronology(), - consensusCoreMock.Hasher(), - consensusCoreMock.Marshalizer(), - consensusCoreMock.RandomnessPrivateKey(), - consensusCoreMock.RandomnessSingleSigner(), - consensusCoreMock.MultiSigner(), - consensusCoreMock.Rounder(), - consensusCoreMock.ShardCoordinator(), - consensusCoreMock.NodesCoordinator(), - consensusCoreMock.SyncTimer(), - ) - - assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilBlocksTracker, err) -} - func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { t.Parallel() @@ -97,7 +68,6 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), nil, consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -124,7 +94,6 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), nil, consensusCoreMock.Chronology(), @@ -151,7 +120,6 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), nil, @@ -178,7 +146,6 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -205,7 +172,6 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -232,7 +198,6 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -259,7 +224,6 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -286,7 +250,6 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -313,7 +276,6 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -340,7 +302,6 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -367,7 +328,6 @@ func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -394,7 +354,6 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -421,7 +380,6 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), diff --git a/consensus/spos/constants.go b/consensus/spos/constants.go index f7e15fe1c52..aeb8bf850e1 100644 --- a/consensus/spos/constants.go +++ b/consensus/spos/constants.go @@ -2,7 +2,3 @@ package spos // maxThresholdPercent specifies the max allocated time percent for doing Job as a percentage of the total time of one round const maxThresholdPercent = 75 - -// MaxRoundsGap defines the maximum expected gap in terms of rounds, between metachain and shardchain, after which -// a block committed and broadcast from shardchain would be visible as notarized in metachain -const MaxRoundsGap = 3 diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index b03c091bbe6..566c09fb385 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -64,9 +64,6 @@ var ErrNilMessenger = errors.New("messenger is nil") // ErrNilBlockProcessor is raised when a valid block processor is expected but nil used var ErrNilBlockProcessor = errors.New("block processor is nil") -// ErrNilBlocksTracker is raised when a valid block tracker is expected but nil used -var ErrNilBlocksTracker = errors.New("blocks tracker is nil") - // ErrNilBootstrapper is raised when a valid block processor is expected but nil used var ErrNilBootstrapper = errors.New("bootstrapper is nil") @@ -142,9 +139,6 @@ var ErrNilBody = errors.New("body is nil") // ErrNilMetaHeader is raised when an expected meta header is nil var ErrNilMetaHeader = errors.New("meta header is nil") -// ErrNilBroadcastUnnotarisedBlocks is raised when a valid broadcastUnnotarisedBlocks function is expected but nil used -var ErrNilBroadcastUnnotarisedBlocks = errors.New("broadcastUnnotarisedBlocks is nil") - // ErrNilForkDetector is raised when a valid fork detector is expected but nil used var ErrNilForkDetector = errors.New("fork detector is nil") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 497c48516ed..343167fee59 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -19,14 +19,6 @@ func (wrk *Worker) SetBlockProcessor(blockProcessor process.BlockProcessor) { wrk.blockProcessor = blockProcessor } -func (wrk *Worker) BlockTracker() process.BlocksTracker { - return wrk.blockTracker -} - -func (wrk *Worker) SetBlockTracker(blockTracker process.BlocksTracker) { - wrk.blockTracker = blockTracker -} - func (wrk *Worker) Bootstrapper() process.Bootstrapper { return wrk.bootstrapper } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 02346c8657b..a48dbd2bdc5 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -18,8 +18,6 @@ type ConsensusCoreHandler interface { Blockchain() data.ChainHandler // BlockProcessor gets the BlockProcessor stored in the ConsensusCore BlockProcessor() process.BlockProcessor - // BlocksTracker gets the BlockTracker stored in the ConsensusCore - BlocksTracker() process.BlocksTracker // BootStrapper gets the Bootstrapper stored in the ConsensusCore BootStrapper() process.Bootstrapper // BroadcastMessenger gets the BroadcastMessenger stored in ConsensusCore @@ -88,8 +86,6 @@ type WorkerHandler interface { GetConsensusStateChangedChannel() chan bool //ExecuteStoredMessages tries to execute all the messages received which are valid for execution ExecuteStoredMessages() - //BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet - BroadcastUnnotarisedBlocks() // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 75bbbb5789d..ac1a04bb3d5 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -21,7 +21,6 @@ import ( type Worker struct { consensusService ConsensusService blockProcessor process.BlockProcessor - blockTracker process.BlocksTracker bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger consensusState *ConsensusState @@ -47,7 +46,6 @@ type Worker struct { func NewWorker( consensusService ConsensusService, blockProcessor process.BlockProcessor, - blockTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, consensusState *ConsensusState, @@ -62,7 +60,6 @@ func NewWorker( err := checkNewWorkerParams( consensusService, blockProcessor, - blockTracker, bootstrapper, broadcastMessenger, consensusState, @@ -81,7 +78,6 @@ func NewWorker( wrk := Worker{ consensusService: consensusService, blockProcessor: blockProcessor, - blockTracker: blockTracker, bootstrapper: bootstrapper, broadcastMessenger: broadcastMessenger, consensusState: consensusState, @@ -108,7 +104,6 @@ func NewWorker( func checkNewWorkerParams( consensusService ConsensusService, blockProcessor process.BlockProcessor, - blockTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, consensusState *ConsensusState, @@ -126,9 +121,6 @@ func checkNewWorkerParams( if blockProcessor == nil || blockProcessor.IsInterfaceNil() { return ErrNilBlockProcessor } - if blockTracker == nil || blockTracker.IsInterfaceNil() { - return ErrNilBlocksTracker - } if bootstrapper == nil || bootstrapper.IsInterfaceNil() { return ErrNilBootstrapper } @@ -394,29 +386,6 @@ func (wrk *Worker) GetConsensusStateChangedChannel() chan bool { return wrk.consensusStateChangedChannel } -//BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet -func (wrk *Worker) BroadcastUnnotarisedBlocks() { - headers := wrk.blockTracker.UnnotarisedBlocks() - for _, header := range headers { - broadcastRound := wrk.blockTracker.BlockBroadcastRound(header.GetNonce()) - if broadcastRound >= wrk.consensusState.RoundIndex-MaxRoundsGap { - continue - } - - err := wrk.broadcastMessenger.BroadcastHeader(header) - if err != nil { - log.Info(err.Error()) - continue - } - - wrk.blockTracker.SetBlockBroadcastRound(header.GetNonce(), wrk.consensusState.RoundIndex) - - log.Info(fmt.Sprintf("%sStep 0: Unnotarised header with nonce %d has been broadcast to metachain\n", - wrk.syncTimer.FormattedCurrentTime(), - header.GetNonce())) - } -} - //ExecuteStoredMessages tries to execute all the messages received which are valid for execution func (wrk *Worker) ExecuteStoredMessages() { wrk.mutReceivedMessages.Lock() diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 6acedc22cf2..84abc510394 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -27,7 +27,6 @@ func initWorker() *spos.Worker { RevertAccountStateCalled: func() { }, } - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -54,7 +53,6 @@ func initWorker() *spos.Worker { sposWorker, _ := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -85,7 +83,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -99,7 +96,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker(nil, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -118,7 +114,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { t.Parallel() - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -133,7 +128,6 @@ func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker(bnService, nil, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -149,45 +143,10 @@ func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilBlockProcessor, err) } -func TestWorker_NewWorkerBlockTrackerNilShouldFail(t *testing.T) { - t.Parallel() - - blockProcessor := &mock.BlockProcessorMock{} - bootstrapperMock := &mock.BootstrapperMock{} - broadcastMessengerMock := &mock.BroadcastMessengerMock{} - consensusState := initConsensusState() - forkDetectorMock := &mock.ForkDetectorMock{} - keyGeneratorMock := &mock.KeyGenMock{} - marshalizerMock := mock.MarshalizerMock{} - rounderMock := initRounderMock() - shardCoordinatorMock := mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - syncTimerMock := &mock.SyncTimerMock{} - bnService, _ := bn.NewConsensusService() - - wrk, err := spos.NewWorker(bnService, - blockProcessor, - nil, - bootstrapperMock, - broadcastMessengerMock, - consensusState, - forkDetectorMock, - keyGeneratorMock, - marshalizerMock, - rounderMock, - shardCoordinatorMock, - singleSignerMock, - syncTimerMock) - - assert.Nil(t, wrk) - assert.Equal(t, spos.ErrNilBlocksTracker, err) -} - func TestWorker_NewWorkerBootstrapperNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -202,7 +161,6 @@ func TestWorker_NewWorkerBootstrapperNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, nil, broadcastMessengerMock, consensusState, @@ -222,7 +180,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -237,7 +194,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, nil, consensusState, @@ -256,7 +212,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} forkDetectorMock := &mock.ForkDetectorMock{} @@ -271,7 +226,6 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, nil, @@ -290,7 +244,6 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -305,7 +258,6 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -324,7 +276,6 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -339,7 +290,6 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -358,7 +308,6 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -373,7 +322,6 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -392,7 +340,6 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -407,7 +354,6 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -426,7 +372,6 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -441,7 +386,6 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -460,7 +404,6 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -475,7 +418,6 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -494,7 +436,6 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -509,7 +450,6 @@ func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -528,7 +468,6 @@ func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShouldWork(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -544,7 +483,6 @@ func TestWorker_NewWorkerShouldWork(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -1381,141 +1319,3 @@ func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { rcvMsg = wrk.ReceivedMessages() assert.Equal(t, 0, len(rcvMsg[msgType])) } - -func TestWorker_BroadcastUnnotarisedBlocksShouldNotBroadcastWhenMaxRoundGapIsNotAchieved(t *testing.T) { - t.Parallel() - - headerHasBeenBroadcast := false - broadcastInRound := int64(0) - - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - headerHasBeenBroadcast = true - return nil - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap)) - - wrk.BroadcastUnnotarisedBlocks() - assert.False(t, headerHasBeenBroadcast) - assert.Equal(t, int64(roundIndex-spos.MaxRoundsGap), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} - -func TestWorker_BroadcastUnnotarisedBlocksShouldErrWhenBroadcastHeaderFails(t *testing.T) { - t.Parallel() - - broadcastInRound := int64(0) - - var err error - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - err = errors.New("broadcast header error") - return err - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap-1)) - - wrk.BroadcastUnnotarisedBlocks() - assert.NotNil(t, err) - assert.Equal(t, int64(roundIndex-spos.MaxRoundsGap-1), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} - -func TestWorker_BroadcastUnnotarisedBlocksShouldBroadcast(t *testing.T) { - t.Parallel() - - headerHasBeenBroadcast := false - broadcastInRound := int64(0) - - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - headerHasBeenBroadcast = true - return nil - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap-1)) - - wrk.BroadcastUnnotarisedBlocks() - assert.True(t, headerHasBeenBroadcast) - assert.Equal(t, roundIndex, wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index dc6a9f1e788..37720d09687 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -330,13 +330,6 @@ func createConsensusOnlyNode( return nil } blockProcessor.Marshalizer = testMarshalizer - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - }, - } blockChain := createTestBlockChain() header := &dataBlock.Header{ @@ -422,7 +415,6 @@ func createConsensusOnlyNode( node.WithDataStore(createTestStore()), node.WithResolversFinder(resolverFinder), node.WithConsensusType(consensusType), - node.WithBlockTracker(blockTracker), ) if err != nil { diff --git a/integrationTests/mock/blocksTrackerMock.go b/integrationTests/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/integrationTests/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitilalizer.go index 619c94b2236..4209789250d 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitilalizer.go @@ -480,17 +480,7 @@ func createNetNode( RequestHandler: requestHandler, Core: &mock.ServiceContainerMock{}, }, - DataPool: dPool, - BlocksTracker: &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - }, + DataPool: dPool, TxCoordinator: tc, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 1e17d4da57f..b5a46498d13 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -42,7 +42,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" ) @@ -110,7 +110,6 @@ type TestProcessorNode struct { PreProcessorsContainer process.PreProcessorsContainer ForkDetector process.ForkDetector - BlockTracker process.BlocksTracker BlockProcessor process.BlockProcessor BroadcastMessenger consensus.BroadcastMessenger Bootstrapper process.Bootstrapper @@ -472,17 +471,6 @@ func (tpn *TestProcessorNode) initBlockProcessor() { }, } - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.BlockProcessor, err = block.NewMetaProcessor( &mock.ServiceContainerMock{}, @@ -516,7 +504,6 @@ func (tpn *TestProcessorNode) initBlockProcessor() { Core: nil, }, DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, TxCoordinator: tpn.TxCoordinator, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 3b7cd83e355..6a751622219 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" @@ -80,17 +79,6 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { func (tpn *TestProcessorNode) initBlockProcessorWithSync() { var err error - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } - if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.Rounder) tpn.BlockProcessor, err = block.NewMetaProcessor( @@ -127,7 +115,6 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { Core: nil, }, DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, TxCoordinator: tpn.TxCoordinator, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } diff --git a/node/defineOptions.go b/node/defineOptions.go index ed1a5b41eb3..0181766fb21 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -214,17 +214,6 @@ func WithBlockProcessor(blockProcessor process.BlockProcessor) Option { } } -// WithBlockTracker sets up the block tracker option for the Node -func WithBlockTracker(blockTracker process.BlocksTracker) Option { - return func(n *Node) error { - if blockTracker == nil || blockTracker.IsInterfaceNil() { - return ErrNilBlockTracker - } - n.blockTracker = blockTracker - return nil - } -} - // WithGenesisTime sets up the genesis time option for the Node func WithGenesisTime(genesisTime time.Time) Option { return func(n *Node) error { diff --git a/node/errors.go b/node/errors.go index 99f0548f19e..b8dd7743455 100644 --- a/node/errors.go +++ b/node/errors.go @@ -49,9 +49,6 @@ var ErrNilRounder = errors.New("trying to set nil rounder") // ErrNilBlockProcessor signals that a nil block processor has been provided var ErrNilBlockProcessor = errors.New("trying to set nil block processor") -// ErrNilBlockTracker signals that a nil block tracker has been provided -var ErrNilBlockTracker = errors.New("trying to set nil block tracker") - // ErrNilDataPool signals that a nil data pool has been provided var ErrNilDataPool = errors.New("trying to set nil data pool") diff --git a/node/node.go b/node/node.go index 2b704e22056..ff8ee4b750c 100644 --- a/node/node.go +++ b/node/node.go @@ -62,7 +62,6 @@ type Node struct { syncTimer ntp.SyncTimer rounder consensus.Rounder blockProcessor process.BlockProcessor - blockTracker process.BlocksTracker genesisTime time.Time accounts state.AccountsAdapter addrConverter state.AddressConverter @@ -256,7 +255,6 @@ func (n *Node) StartConsensus() error { worker, err := spos.NewWorker( consensusService, n.blockProcessor, - n.blockTracker, bootstrapper, broadcastMessenger, consensusState, @@ -280,7 +278,6 @@ func (n *Node) StartConsensus() error { consensusDataContainer, err := spos.NewConsensusCore( n.blkc, n.blockProcessor, - n.blockTracker, bootstrapper, broadcastMessenger, chronologyHandler, diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 604adc57c8d..50cf20851b8 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -34,7 +34,6 @@ type ArgBaseProcessor struct { type ArgShardProcessor struct { *ArgBaseProcessor DataPool dataRetriever.PoolsHolder - BlocksTracker process.BlocksTracker TxCoordinator process.TransactionCoordinator TxsPoolsCleaner process.PoolsCleaner } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 39b272400e6..a4fa9a0f6ee 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -331,7 +331,6 @@ func CreateMockArguments() blproc.ArgShardProcessor { Core: &mock.ServiceContainerMock{}, }, DataPool: initDataPool([]byte("")), - BlocksTracker: &mock.BlocksTrackerMock{}, TxCoordinator: &mock.TransactionCoordinatorMock{}, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } diff --git a/process/block/export_test.go b/process/block/export_test.go index 196de84e15b..17ae9410ae3 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -76,7 +76,6 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo Core: &mock.ServiceContainerMock{}, }, DataPool: tdp, - BlocksTracker: &mock.BlocksTrackerMock{}, TxCoordinator: &mock.TransactionCoordinatorMock{}, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 53affc3d7a0..43ee3610050 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -24,7 +24,6 @@ const maxCleanTime = time.Second type shardProcessor struct { *baseProcessor dataPool dataRetriever.PoolsHolder - blocksTracker process.BlocksTracker metaBlockFinality int chRcvAllMetaHdrs chan bool @@ -66,9 +65,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if arguments.BlocksTracker == nil || arguments.BlocksTracker.IsInterfaceNil() { - return nil, process.ErrNilBlocksTracker - } if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } @@ -108,7 +104,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { core: arguments.Core, baseProcessor: base, dataPool: arguments.DataPool, - blocksTracker: arguments.BlocksTracker, txCoordinator: arguments.TxCoordinator, txCounter: NewTransactionCounter(), txsPoolsCleaner: arguments.TxsPoolsCleaner, @@ -768,8 +763,6 @@ func (sp *shardProcessor) CommitBlock( header.Nonce, core.ToB64(headerHash))) - sp.blocksTracker.AddBlock(header) - errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) if errNotCritical != nil { log.Debug(errNotCritical.Error()) @@ -1039,7 +1032,6 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] } processed := 0 - unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) // processedMetaHdrs is also sorted for i := 0; i < len(processedMetaHdrs); i++ { hdr := processedMetaHdrs[i] @@ -1049,9 +1041,6 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] continue } - errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) - log.LogIfError(errNotCritical) - // metablock was processed and finalized buff, err := sp.marshalizer.Marshal(hdr) if err != nil { @@ -1089,11 +1078,6 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) } - notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) - if notarized > 0 { - log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) - } - return nil } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index f24661e06f0..55403e4cfca 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -175,17 +175,6 @@ func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { assert.Nil(t, sp) } -func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { - t.Parallel() - - arguments := CreateMockArguments() - arguments.BlocksTracker = nil - sp, err := blproc.NewShardProcessor(arguments) - - assert.Equal(t, process.ErrNilBlocksTracker, err) - assert.Nil(t, sp) -} - func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { t.Parallel() @@ -1592,13 +1581,6 @@ func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) return 0 }, } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } sp, _ := blproc.NewShardProcessor(arguments) blkc, _ := blockchain.NewBlockChain( @@ -1663,13 +1645,6 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { return 0 }, } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } sp, err := blproc.NewShardProcessor(arguments) assert.Nil(t, err) @@ -1908,13 +1883,6 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { arguments.Hasher = hasher arguments.Accounts = accounts arguments.ForkDetector = fd - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } sp, _ := blproc.NewShardProcessor(arguments) blkc := createTestBlockchain() @@ -2017,13 +1985,6 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { arguments.Hasher = hasher arguments.Accounts = accounts arguments.ForkDetector = fd - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.TxCoordinator = &mock.TransactionCoordinatorMock{ GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { switch blockType { @@ -2983,11 +2944,6 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { return 0 }, } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - } arguments.StartHeaders = createGenesisBlocks(shardCoordinator) bp, _ := blproc.NewShardProcessor(arguments) @@ -3211,14 +3167,6 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { arguments.Hasher = hasher arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3328,14 +3276,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3484,14 +3424,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3619,14 +3551,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) diff --git a/process/interface.go b/process/interface.go index afa4665795c..4448e726ff5 100644 --- a/process/interface.go +++ b/process/interface.go @@ -15,7 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine @@ -341,16 +341,6 @@ type DataPacker interface { IsInterfaceNil() bool } -// BlocksTracker defines the functionality to track all the notarised blocks -type BlocksTracker interface { - UnnotarisedBlocks() []data.HeaderHandler - RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error - AddBlock(headerHandler data.HeaderHandler) - SetBlockBroadcastRound(nonce uint64, round int64) - BlockBroadcastRound(nonce uint64) int64 - IsInterfaceNil() bool -} - // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { RequestHeaderByNonce(shardId uint32, nonce uint64) diff --git a/process/mock/blocksTrackerMock.go b/process/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/process/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/process/track/metaBlock.go b/process/track/metaBlock.go deleted file mode 100644 index 2df33492a8c..00000000000 --- a/process/track/metaBlock.go +++ /dev/null @@ -1,46 +0,0 @@ -package track - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -// metaBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks -type metaBlockTracker struct { -} - -// NewMetaBlockTracker creates a new metaBlockTracker object -func NewMetaBlockTracker() (*metaBlockTracker, error) { - mbt := metaBlockTracker{} - return &mbt, nil -} - -// UnnotarisedBlocks gets all the blocks which are not notarised yet -func (mbt *metaBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) -} - -// RemoveNotarisedBlocks removes all the blocks which already have been notarised -func (mbt *metaBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return nil -} - -// AddBlock adds new block to be tracked -func (mbt *metaBlockTracker) AddBlock(headerHandler data.HeaderHandler) { -} - -// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast -func (mbt *metaBlockTracker) SetBlockBroadcastRound(nonce uint64, round int64) { -} - -// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast -func (mbt *metaBlockTracker) BlockBroadcastRound(nonce uint64) int64 { - return 0 -} - -// IsInterfaceNil returns true if there is no value under the interface -func (mbt *metaBlockTracker) IsInterfaceNil() bool { - if mbt == nil { - return true - } - return false -} diff --git a/process/track/metaBlock_test.go b/process/track/metaBlock_test.go deleted file mode 100644 index ec9d21fabe2..00000000000 --- a/process/track/metaBlock_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package track_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/stretchr/testify/assert" -) - -func TestMetaBlockTracker_NewMetaBlockTrackerShouldWork(t *testing.T) { - t.Parallel() - - mbt, err := track.NewMetaBlockTracker() - assert.Nil(t, err) - assert.NotNil(t, mbt) -} - -func TestMetaBlockTracker_UnnotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - headers := mbt.UnnotarisedBlocks() - assert.Equal(t, make([]data.HeaderHandler, 0), headers) -} - -func TestMetaBlockTracker_BlockBroadcastRoundShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - assert.Equal(t, int64(0), mbt.BlockBroadcastRound(1)) -} - -func TestMetaBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - err := mbt.RemoveNotarisedBlocks(&block.MetaBlock{}) - assert.Nil(t, err) -} diff --git a/process/track/shardBlock.go b/process/track/shardBlock.go deleted file mode 100644 index 8eb0d9ecdc1..00000000000 --- a/process/track/shardBlock.go +++ /dev/null @@ -1,170 +0,0 @@ -package track - -import ( - "fmt" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -var log = logger.DefaultLogger() - -type headerInfo struct { - header data.HeaderHandler - broadcastInRound int64 -} - -// shardBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks -type shardBlockTracker struct { - dataPool dataRetriever.PoolsHolder - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - store dataRetriever.StorageService - - mutUnnotarisedHeaders sync.RWMutex - unnotarisedHeaders map[uint64]*headerInfo -} - -// NewShardBlockTracker creates a new shardBlockTracker object -func NewShardBlockTracker( - dataPool dataRetriever.PoolsHolder, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - store dataRetriever.StorageService, -) (*shardBlockTracker, error) { - err := checkTrackerNilParameters( - dataPool, - marshalizer, - shardCoordinator, - store) - if err != nil { - return nil, err - } - - sbt := shardBlockTracker{ - dataPool: dataPool, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - store: store, - } - - sbt.unnotarisedHeaders = make(map[uint64]*headerInfo) - - return &sbt, nil -} - -// checkTrackerNilParameters will check the imput parameters for nil values -func checkTrackerNilParameters( - dataPool dataRetriever.PoolsHolder, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - store dataRetriever.StorageService, -) error { - if dataPool == nil || dataPool.IsInterfaceNil() { - return process.ErrNilDataPoolHolder - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return process.ErrNilMarshalizer - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return process.ErrNilShardCoordinator - } - if store == nil || store.IsInterfaceNil() { - return process.ErrNilStorage - } - - return nil -} - -// AddBlock adds new block to be tracked -func (sbt *shardBlockTracker) AddBlock(headerHandler data.HeaderHandler) { - sbt.mutUnnotarisedHeaders.Lock() - sbt.unnotarisedHeaders[headerHandler.GetNonce()] = &headerInfo{header: headerHandler, broadcastInRound: 0} - sbt.mutUnnotarisedHeaders.Unlock() -} - -// RemoveNotarisedBlocks removes all the blocks which already have been notarised -func (sbt *shardBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - metaBlock, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - for _, shardData := range metaBlock.ShardInfo { - if shardData.ShardId != sbt.shardCoordinator.SelfId() { - continue - } - - header, err := process.GetShardHeaderFromPool( - shardData.HeaderHash, - sbt.dataPool.Headers()) - if err != nil { - continue - } - - sbt.mutUnnotarisedHeaders.Lock() - delete(sbt.unnotarisedHeaders, header.Nonce) - sbt.mutUnnotarisedHeaders.Unlock() - - log.Debug(fmt.Sprintf("shardBlock with nonce %d and hash %s has been notarised by metachain\n", - header.GetNonce(), - core.ToB64(shardData.HeaderHash))) - } - - return nil -} - -// UnnotarisedBlocks gets all the blocks which are not notarised yet -func (sbt *shardBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { - sbt.mutUnnotarisedHeaders.RLock() - - hdrs := make([]data.HeaderHandler, 0) - for _, hInfo := range sbt.unnotarisedHeaders { - hdrs = append(hdrs, hInfo.header) - } - - sbt.mutUnnotarisedHeaders.RUnlock() - - return hdrs -} - -// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast -func (sbt *shardBlockTracker) SetBlockBroadcastRound(nonce uint64, round int64) { - sbt.mutUnnotarisedHeaders.Lock() - - hInfo := sbt.unnotarisedHeaders[nonce] - if hInfo != nil { - hInfo.broadcastInRound = round - sbt.unnotarisedHeaders[nonce] = hInfo - } - - sbt.mutUnnotarisedHeaders.Unlock() -} - -// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast -func (sbt *shardBlockTracker) BlockBroadcastRound(nonce uint64) int64 { - sbt.mutUnnotarisedHeaders.RLock() - hInfo := sbt.unnotarisedHeaders[nonce] - sbt.mutUnnotarisedHeaders.RUnlock() - - if hInfo == nil { - return 0 - } - - return hInfo.broadcastInRound -} - -// IsInterfaceNil returns true if there is no value under the interface -func (sbt *shardBlockTracker) IsInterfaceNil() bool { - if sbt == nil { - return true - } - return false -} diff --git a/process/track/shardBlock_test.go b/process/track/shardBlock_test.go deleted file mode 100644 index cb51c1c8802..00000000000 --- a/process/track/shardBlock_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package track_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" -) - -func TestNewShardBlockTracker_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(nil, marshalizer, shardCoordinator, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilDataPoolHolder, err) -} - -func TestNewShardBlockTracker_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, nil, shardCoordinator, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilMarshalizer, err) -} - -func TestNewShardBlockTracker_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, nil, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilShardCoordinator, err) -} - -func TestNewShardBlockTracker_NilStoreShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, nil) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilStorage, err) -} - -func TestNewShardBlockTracker_OkValsShouldWork(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - assert.Nil(t, err) - assert.NotNil(t, mbt) -} - -func TestShardBlockTracker_AddBlockShouldWork(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr1 := &block.Header{Nonce: 2} - mbt.AddBlock(hdr1) - hdr2 := &block.Header{Nonce: 3} - mbt.AddBlock(hdr2) - headers := mbt.UnnotarisedBlocks() - assert.Equal(t, 2, len(headers)) -} - -func TestShardBlockTracker_SetBlockBroadcastRoundShoudNotSetRoundWhenNonceDoesNotExist(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr := &block.Header{Nonce: 2} - mbt.AddBlock(hdr) - mbt.SetBlockBroadcastRound(1, 10) - assert.Equal(t, int64(0), mbt.BlockBroadcastRound(1)) -} - -func TestShardBlockTracker_SetBlockBroadcastRoundShoudSetRound(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr := &block.Header{Nonce: 2} - mbt.AddBlock(hdr) - mbt.SetBlockBroadcastRound(2, 10) - assert.Equal(t, int64(10), mbt.BlockBroadcastRound(2)) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - err := mbt.RemoveNotarisedBlocks(nil) - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfShardIdIsNotSelf(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 1, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - header := &block.Header{Nonce: 1} - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfGetShardHeaderErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{ - HeadersCalled: func() storage.Cacher { - return nil - }, - } - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - header := &block.Header{Nonce: 1} - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - header := &block.Header{Nonce: 1} - - pools := &mock.PoolsHolderStub{ - HeadersCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return header, true - }, - } - }, - } - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 0, len(mbt.UnnotarisedBlocks())) -} From 1ecb4d961be25f652f82a43d542cc2e153967476 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 3 Oct 2019 12:01:35 +0300 Subject: [PATCH 189/234] adding tx fee handler on tx interceptor - work in progress --- process/errors.go | 6 +++ process/mock/feeHandlerMock.go | 27 ------------ process/mock/feeHandlerStub.go | 27 ++++++++++++ process/transaction/interceptedTransaction.go | 24 ++++++++-- .../interceptedTransaction_test.go | 44 ++++++++++++++++++- 5 files changed, 96 insertions(+), 32 deletions(-) delete mode 100644 process/mock/feeHandlerMock.go create mode 100644 process/mock/feeHandlerStub.go diff --git a/process/errors.go b/process/errors.go index eaa43ce47dc..782bac3ff2b 100644 --- a/process/errors.go +++ b/process/errors.go @@ -492,3 +492,9 @@ var ErrNilThrottler = errors.New("nil throttler") // ErrSystemBusy signals that the system is busy var ErrSystemBusy = errors.New("system busy") + +// ErrInsufficientGasPriceInTx signals that a lower gas price than required was provided +var ErrInsufficientGasPriceInTx = errors.New("insufficient gas price in tx") + +// ErrInsufficientGasLimitInTx signals that a lower gas limit than required was provided +var ErrInsufficientGasLimitInTx = errors.New("insufficient gas limit in tx") diff --git a/process/mock/feeHandlerMock.go b/process/mock/feeHandlerMock.go deleted file mode 100644 index 881d128d98b..00000000000 --- a/process/mock/feeHandlerMock.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -type FeeHandlerMock struct { - MinGasPriceCalled func() uint64 - MinGasLimitForTxCalled func() uint64 - MinTxFeeCalled func() uint64 -} - -func (fhm *FeeHandlerMock) MinGasPrice() uint64 { - return fhm.MinGasPriceCalled() -} - -func (fhm *FeeHandlerMock) MinGasLimitForTx() uint64 { - return fhm.MinGasLimitForTxCalled() -} - -func (fhm *FeeHandlerMock) MinTxFee() uint64 { - return fhm.MinTxFeeCalled() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (fhm *FeeHandlerMock) IsInterfaceNil() bool { - if fhm == nil { - return true - } - return false -} diff --git a/process/mock/feeHandlerStub.go b/process/mock/feeHandlerStub.go new file mode 100644 index 00000000000..a2a1fe31c87 --- /dev/null +++ b/process/mock/feeHandlerStub.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerStub struct { + MinGasPriceCalled func() uint64 + MinGasLimitForTxCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhs *FeeHandlerStub) MinGasPrice() uint64 { + return fhs.MinGasPriceCalled() +} + +func (fhs *FeeHandlerStub) MinGasLimitForTx() uint64 { + return fhs.MinGasLimitForTxCalled() +} + +func (fhs *FeeHandlerStub) MinTxFee() uint64 { + return fhs.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhs *FeeHandlerStub) IsInterfaceNil() bool { + if fhs == nil { + return true + } + return false +} diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 46710308707..0a8f001299e 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -27,6 +27,7 @@ type InterceptedTransaction struct { sndShard uint32 isAddressedToOtherShards bool sndAddr state.AddressContainer + feeHandler process.FeeHandler } // NewInterceptedTransaction returns a new instance of InterceptedTransaction @@ -38,6 +39,7 @@ func NewInterceptedTransaction( signer crypto.SingleSigner, addrConv state.AddressConverter, coordinator sharding.Coordinator, + feeHandler process.FeeHandler, ) (*InterceptedTransaction, error) { if txBuff == nil { @@ -61,6 +63,9 @@ func NewInterceptedTransaction( if coordinator == nil || coordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if feeHandler == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } tx := &transaction.Transaction{} err := marshalizer.Unmarshal(tx, txBuff) @@ -76,6 +81,7 @@ func NewInterceptedTransaction( addrConv: addrConv, keyGen: keyGen, coordinator: coordinator, + feeHandler: feeHandler, } txBuffWithoutSig, err := inTx.processFields(txBuff) @@ -133,23 +139,33 @@ func (inTx *InterceptedTransaction) integrity() error { if inTx.tx.Signature == nil { return process.ErrNilSignature } - if inTx.tx.RcvAddr == nil { return process.ErrNilRcvAddr } - if inTx.tx.SndAddr == nil { return process.ErrNilSndAddr } - if inTx.tx.Value == nil { return process.ErrNilValue } - if inTx.tx.Value.Cmp(big.NewInt(0)) < 0 { return process.ErrNegativeValue } + return inTx.checkFeeValues() +} + +func (inTx *InterceptedTransaction) checkFeeValues() error { + isLowerGasLimitInTx := inTx.tx.GasLimit < inTx.feeHandler.MinGasLimitForTx() + if isLowerGasLimitInTx { + return process.ErrInsufficientGasPriceInTx + } + + isLowerGasPrice := inTx.tx.GasPrice < inTx.feeHandler.MinGasPrice() + if isLowerGasPrice { + return process.ErrInsufficientGasLimitInTx + } + return nil } diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index 6040c5f6f3f..cba26c75606 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -47,7 +47,20 @@ func createKeyGenMock() crypto.KeyGenerator { } } -func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction) (*transaction.InterceptedTransaction, error) { +func createTxFeeHandler(gasPrice uint64, gasLimit uint64) process.FeeHandler { + feeHandler := &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return gasPrice + }, + MinGasLimitForTxCalled: func() uint64 { + return gasLimit + }, + } + + return feeHandler +} + +func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandler process.FeeHandler) (*transaction.InterceptedTransaction, error) { marshalizer := &mock.MarshalizerMock{} txBuff, _ := marshalizer.Marshal(tx) @@ -76,6 +89,7 @@ func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction) (*transacti }, }, shardCoordinator, + txFeeHandler, ) } @@ -90,6 +104,7 @@ func TestNewInterceptedTransaction_NilBufferShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -107,6 +122,7 @@ func TestNewInterceptedTransaction_NilMarshalizerShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -124,6 +140,7 @@ func TestNewInterceptedTransaction_NilHasherShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -141,6 +158,7 @@ func TestNewInterceptedTransaction_NilKeyGenShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -158,6 +176,7 @@ func TestNewInterceptedTransaction_NilSignerShouldErr(t *testing.T) { nil, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -175,6 +194,7 @@ func TestNewInterceptedTransaction_NilAddressConverterShouldErr(t *testing.T) { &mock.SignerMock{}, nil, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -192,12 +212,31 @@ func TestNewInterceptedTransaction_NilCoordinatorShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, nil, + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptedTransaction_NilFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txi, err := transaction.NewInterceptedTransaction( + make([]byte, 0), + &mock.MarshalizerMock{}, + mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.AddressConverterMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + ) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) +} + func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { t.Parallel() @@ -215,6 +254,7 @@ func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -241,6 +281,7 @@ func TestNewInterceptedTransaction_MarshalingCopiedTxFailsShouldErr(t *testing.T &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -262,6 +303,7 @@ func TestNewInterceptedTransaction_AddrConvFailsShouldErr(t *testing.T) { }, }, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) From 6ff2a6e9297e0195606af8d5c891ed8150c40fd0 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 3 Oct 2019 13:04:14 +0300 Subject: [PATCH 190/234] finished implementing - fixing work in progress --- cmd/node/factory/structs.go | 14 +++- .../shard/interceptorsContainerFactory.go | 7 ++ .../interceptorsContainerFactory_test.go | 49 ++++++++++++ .../preProcessorsContainerFactory_test.go | 37 ++++----- process/transaction/interceptedTransaction.go | 4 +- .../interceptedTransaction_test.go | 77 +++++++++++++++---- process/transaction/interceptor.go | 10 ++- process/transaction/interceptor_test.go | 49 ++++++++++++ process/transaction/process_test.go | 4 +- 9 files changed, 214 insertions(+), 37 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index 28f4705571d..f451dc836e0 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -460,7 +460,14 @@ func NewProcessComponentsFactoryArgs( // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.nodesCoordinator, args.data, args.core, args.crypto, args.state, args.network) + args.shardCoordinator, + args.nodesCoordinator, + args.data, args.core, + args.crypto, + args.state, + args.network, + args.economicsData, + ) if err != nil { return nil, err } @@ -1183,7 +1190,9 @@ func newInterceptorAndResolverContainerFactory( crypto *Crypto, state *State, network *Network, + economics *economics.EconomicsData, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return newShardInterceptorAndResolverContainerFactory( shardCoordinator, @@ -1193,6 +1202,7 @@ func newInterceptorAndResolverContainerFactory( crypto, state, network, + economics, ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { @@ -1217,6 +1227,7 @@ func newShardInterceptorAndResolverContainerFactory( crypto *Crypto, state *State, network *Network, + economics *economics.EconomicsData, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( @@ -1233,6 +1244,7 @@ func newShardInterceptorAndResolverContainerFactory( data.Datapool, state.AddressConverter, maxTxNonceDeltaAllowed, + economics, ) if err != nil { return nil, nil, err diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 28f782a8204..1a905cad128 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -35,6 +35,7 @@ type interceptorsContainerFactory struct { nodesCoordinator sharding.NodesCoordinator txInterceptorThrottler process.InterceptorThrottler maxTxNonceDeltaAllowed int + txFeeHandler process.FeeHandler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -52,6 +53,7 @@ func NewInterceptorsContainerFactory( dataPool dataRetriever.PoolsHolder, addrConverter state.AddressConverter, maxTxNonceDeltaAllowed int, + txFeeHandler process.FeeHandler, ) (*interceptorsContainerFactory, error) { if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter @@ -89,6 +91,9 @@ func NewInterceptorsContainerFactory( if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { return nil, process.ErrNilNodesCoordinator } + if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } txInterceptorThrottler, err := throttler.NewNumGoRoutineThrottler(maxGoRoutineTxInterceptor) if err != nil { @@ -110,6 +115,7 @@ func NewInterceptorsContainerFactory( addrConverter: addrConverter, txInterceptorThrottler: txInterceptorThrottler, maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + txFeeHandler: txFeeHandler, }, nil } @@ -255,6 +261,7 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier strin icf.keyGen, icf.shardCoordinator, icf.txInterceptorThrottler, + icf.txFeeHandler, ) if err != nil { diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index d1eaf0e9821..c76c31e3ce6 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -99,6 +99,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -122,6 +123,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -145,6 +147,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -168,6 +171,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -191,6 +195,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -214,6 +219,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -237,6 +243,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -260,6 +267,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -283,6 +291,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -306,6 +315,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -329,6 +339,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { nil, &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -352,12 +363,37 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) createDataPools(), nil, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilAddressConverter, err) } +func TestNewInterceptorsContainerFactory_NilTxFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) +} + func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -375,6 +411,7 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.NotNil(t, icf) @@ -400,6 +437,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -425,6 +463,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -450,6 +489,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -475,6 +515,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -500,6 +541,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -525,6 +567,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -550,6 +593,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -575,6 +619,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -600,6 +645,7 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -625,6 +671,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -657,6 +704,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -702,6 +750,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { createDataPools(), &mock.AddressConverterMock{}, maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, _ := icf.Create() diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index a330b6a09c3..518c8c5d9c0 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -1,11 +1,12 @@ package shard import ( + "testing" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" - "testing" ) func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { @@ -25,7 +26,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -49,7 +50,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -73,7 +74,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -97,7 +98,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -121,7 +122,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -145,7 +146,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -169,7 +170,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -193,7 +194,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -217,7 +218,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -241,7 +242,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { nil, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) @@ -265,7 +266,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, nil, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilRewardsTxProcessor, err) @@ -289,7 +290,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -313,7 +314,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -341,7 +342,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -379,7 +380,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -426,13 +427,13 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) assert.NotNil(t, ppcm) container, err := ppcm.Create() - assert.Equal(t, 3, container.Len()) assert.Nil(t, err) + assert.Equal(t, 3, container.Len()) } diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 0a8f001299e..398094e834a 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -158,12 +158,12 @@ func (inTx *InterceptedTransaction) integrity() error { func (inTx *InterceptedTransaction) checkFeeValues() error { isLowerGasLimitInTx := inTx.tx.GasLimit < inTx.feeHandler.MinGasLimitForTx() if isLowerGasLimitInTx { - return process.ErrInsufficientGasPriceInTx + return process.ErrInsufficientGasLimitInTx } isLowerGasPrice := inTx.tx.GasPrice < inTx.feeHandler.MinGasPrice() if isLowerGasPrice { - return process.ErrInsufficientGasLimitInTx + return process.ErrInsufficientGasPriceInTx } return nil diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index cba26c75606..dc0ff9cae88 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -60,6 +60,10 @@ func createTxFeeHandler(gasPrice uint64, gasLimit uint64) process.FeeHandler { return feeHandler } +func createFreeTxFeeHandler() process.FeeHandler { + return createTxFeeHandler(0, 0) +} + func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandler process.FeeHandler) (*transaction.InterceptedTransaction, error) { marshalizer := &mock.MarshalizerMock{} txBuff, _ := marshalizer.Marshal(tx) @@ -324,7 +328,7 @@ func TestNewInterceptedTransaction_NilSignatureShouldErr(t *testing.T) { Signature: nil, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilSignature, err) @@ -344,7 +348,7 @@ func TestNewInterceptedTransaction_NilSenderAddressShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilSndAddr, err) @@ -364,7 +368,7 @@ func TestNewInterceptedTransaction_NilRecvAddressShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilRcvAddr, err) @@ -384,7 +388,7 @@ func TestNewInterceptedTransaction_NilValueShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilValue, err) @@ -404,7 +408,7 @@ func TestNewInterceptedTransaction_NilNegativeValueShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNegativeValue, err) @@ -424,12 +428,58 @@ func TestNewInterceptedTransaction_InvalidSenderShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, errSingleSignKeyGenMock, err) } +func TestNewInterceptedTransaction_InsufficientGasPriceShouldErr(t *testing.T) { + t.Parallel() + + gasLimit := uint64(3) + gasPrice := uint64(4) + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: "data", + GasLimit: gasLimit, + GasPrice: gasPrice, + RcvAddr: recvAddress, + SndAddr: []byte(""), + Signature: sigOk, + } + feeHandler := createTxFeeHandler(gasPrice+1, gasLimit) + + txi, err := createInterceptedTxFromPlainTx(tx, feeHandler) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrInsufficientGasPriceInTx, err) +} + +func TestNewInterceptedTransaction_InsufficientGasLimitShouldErr(t *testing.T) { + t.Parallel() + + gasLimit := uint64(3) + gasPrice := uint64(4) + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: "data", + GasLimit: gasLimit, + GasPrice: gasPrice, + RcvAddr: recvAddress, + SndAddr: []byte(""), + Signature: sigOk, + } + feeHandler := createTxFeeHandler(gasPrice, gasLimit+1) + + txi, err := createInterceptedTxFromPlainTx(tx, feeHandler) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrInsufficientGasLimitInTx, err) +} + func TestNewInterceptedTransaction_VerifyFailsShouldErr(t *testing.T) { t.Parallel() @@ -444,7 +494,7 @@ func TestNewInterceptedTransaction_VerifyFailsShouldErr(t *testing.T) { Signature: []byte("wrong sig"), } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, errSignerMockVerifySigFails, err) @@ -464,7 +514,7 @@ func TestNewInterceptedTransaction_ShouldWork(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.NotNil(t, txi) assert.Nil(t, err) @@ -485,7 +535,7 @@ func TestNewInterceptedTransaction_OkValsGettersShouldWork(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Equal(t, senderShard, txi.SndShard()) assert.Equal(t, recvShard, txi.RcvShard()) @@ -539,6 +589,7 @@ func TestNewInterceptedTransaction_ScTxDeployRecvShardIdShouldBeSendersShardId(t }, }, shardCoordinator, + createFreeTxFeeHandler(), ) assert.Nil(t, err) @@ -562,7 +613,7 @@ func TestNewInterceptedTransaction_GetNonce(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.Nonce() assert.Equal(t, nonce, result) @@ -582,7 +633,7 @@ func TestNewInterceptedTransaction_SenderShardId(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.SenderShardId() assert.Equal(t, senderShard, result) @@ -610,7 +661,7 @@ func TestNewInterceptedTransaction_GetTotalValue(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.TotalValue() assert.Equal(t, expectedValue, result) @@ -630,7 +681,7 @@ func TestNewInterceptedTransaction_GetSenderAddress(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.SenderAddress() assert.NotNil(t, result) diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index 67e0102a6f2..21f3818d912 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -26,6 +26,7 @@ type TxInterceptor struct { shardCoordinator sharding.Coordinator broadcastCallbackHandler func(buffToSend []byte) throttler process.InterceptorThrottler + feeHandler process.FeeHandler } // NewTxInterceptor hooks a new interceptor for transactions @@ -39,6 +40,7 @@ func NewTxInterceptor( keyGen crypto.KeyGenerator, shardCoordinator sharding.Coordinator, throttler process.InterceptorThrottler, + feeHandler process.FeeHandler, ) (*TxInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -68,6 +70,9 @@ func NewTxInterceptor( if throttler == nil || throttler.IsInterfaceNil() { return nil, process.ErrNilThrottler } + if feeHandler == nil || feeHandler.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } txIntercept := &TxInterceptor{ marshalizer: marshalizer, @@ -79,6 +84,7 @@ func NewTxInterceptor( keyGen: keyGen, shardCoordinator: shardCoordinator, throttler: throttler, + feeHandler: feeHandler, } return txIntercept, nil @@ -121,7 +127,9 @@ func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { txi.keyGen, txi.singleSigner, txi.addrConverter, - txi.shardCoordinator) + txi.shardCoordinator, + txi.feeHandler, + ) if err != nil { lastErrEncountered = err diff --git a/process/transaction/interceptor_test.go b/process/transaction/interceptor_test.go index ac9352aca02..7d15f776f37 100644 --- a/process/transaction/interceptor_test.go +++ b/process/transaction/interceptor_test.go @@ -49,6 +49,7 @@ func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -75,6 +76,7 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilTxDataPool, err) @@ -101,6 +103,7 @@ func TestNewTxInterceptor_NilTxHandlerValidatorShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilTxHandlerValidator, err) @@ -127,6 +130,7 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -154,6 +158,7 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -180,6 +185,7 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilSingleSigner, err) @@ -206,6 +212,7 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { nil, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilKeyGen, err) @@ -232,6 +239,7 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { keyGen, nil, throttler, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -258,12 +266,41 @@ func TestNewTxInterceptor_NilThrottlerShouldErr(t *testing.T) { keyGen, oneSharder, nil, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilThrottler, err) assert.Nil(t, txi) } +func TestNewTxInterceptor_NilFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + throttler := &mock.InterceptorThrottlerStub{} + + txi, err := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + throttler, + nil, + ) + + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) + assert.Nil(t, txi) +} + func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -285,6 +322,7 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -318,6 +356,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageSystemBusyShouldErr(t *tes keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) err := txi.ProcessReceivedMessage(nil) @@ -352,6 +391,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *te keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) err := txi.ProcessReceivedMessage(nil) @@ -386,6 +426,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) msg := &mock.P2PMessageMock{} @@ -428,6 +469,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) msg := &mock.P2PMessageMock{ @@ -473,6 +515,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) msg := &mock.P2PMessageMock{ @@ -513,6 +556,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t keyGen, oneSharder, throttler, + &mock.FeeHandlerStub{}, ) txNewer := &dataTransaction.Transaction{ @@ -580,6 +624,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS keyGen, oneSharder, throttler, + createFreeTxFeeHandler(), ) tx1 := &dataTransaction.Transaction{ @@ -666,6 +711,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t keyGen, oneSharder, throttler, + createFreeTxFeeHandler(), ) txNewer := &dataTransaction.Transaction{ @@ -733,6 +779,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( keyGen, oneSharder, throttler, + createFreeTxFeeHandler(), ) txNewer := &dataTransaction.Transaction{ @@ -811,6 +858,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor keyGen, multiSharder, throttler, + createFreeTxFeeHandler(), ) txNewer := &dataTransaction.Transaction{ @@ -900,6 +948,7 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * keyGen, multiSharder, throttler, + createFreeTxFeeHandler(), ) txNewer := &dataTransaction.Transaction{ diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 61b56bffd64..8bcab0e79c1 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -24,8 +24,8 @@ func generateRandomByteSlice(size int) []byte { return buff } -func FeeHandlerMock() *mock.FeeHandlerMock { - return &mock.FeeHandlerMock{ +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, From 1517d942795e4feb8831e53e72c5f62b18b2499f Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Thu, 3 Oct 2019 14:35:55 +0300 Subject: [PATCH 191/234] * Fixed unit tests --- process/block/baseProcess_test.go | 2 +- process/block/export_test.go | 25 ++- process/block/shardblock.go | 283 ++++++++++++++---------------- process/block/shardblock_test.go | 135 ++++++-------- 4 files changed, 207 insertions(+), 238 deletions(-) diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 39b272400e6..8114f52efef 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -245,7 +245,7 @@ func initStore() *dataRetriever.ChainStorer { return store } -func createDummyMetaBlock(destShardId uint32, senderShardId uint32, miniBlockHashes ...[]byte) data.HeaderHandler { +func createDummyMetaBlock(destShardId uint32, senderShardId uint32, miniBlockHashes ...[]byte) *block.MetaBlock { metaBlock := &block.MetaBlock{ ShardInfo: []block.ShardData{ { diff --git a/process/block/export_test.go b/process/block/export_test.go index ce15ee1afd5..6fc28dafc6a 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -250,8 +250,8 @@ func (sp *shardProcessor) RequestFinalMissingHeaders() uint32 { return sp.requestFinalMissingHeaders() } -func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality(hdr *block.Header) error { - return sp.checkMetaHeadersValidityAndFinality(hdr) +func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality() error { + return sp.checkMetaHeadersValidityAndFinality() } func (sp *shardProcessor) GetOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { @@ -299,9 +299,8 @@ func (sp *shardProcessor) RestoreMetaBlockIntoPool( func (sp *shardProcessor) GetAllMiniBlockDstMeFromMeta( round uint64, - metaHashes [][]byte, ) (map[string][]byte, error) { - return sp.getAllMiniBlockDstMeFromMeta(round, metaHashes) + return sp.getAllMiniBlockDstMeFromMeta(round) } func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { @@ -311,3 +310,21 @@ func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHa func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) } + +func (sp *shardProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler data.HeaderHandler, usedInBlock bool) { + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + sp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (sp *shardProcessor) SetMissingHdrsForCurrentBlock(missingHdrs uint32) { + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + sp.hdrsForCurrBlock.missingHdrs = missingHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (sp *shardProcessor) SetMissingFinalHdrsForCurrentBlock(missingFinalHdrs uint32) { + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + sp.hdrsForCurrBlock.missingFinalHdrs = missingFinalHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c5942747cab..3dbf5d3712f 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -44,7 +44,7 @@ type shardProcessor struct { blocksTracker process.BlocksTracker metaBlockFinality int - chRcvAllMetaHdrs chan bool + chRcvAllMetaHdrs chan bool hdrsForCurrBlock hdrForBlock currHighestMetaHdrNonce uint64 @@ -324,35 +324,22 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return err } - currAddedMetaHdrs := make([]*block.MetaBlock, 0) - - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { - if !hdrInfo.usedInBlock { - continue - } - - metaHdr, ok := hdrInfo.hdr.(*block.MetaBlock) - if !ok { - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return process.ErrWrongTypeAssertion - } - - currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) + usedMetaHdrs, err := sp.sortHdrsForCurrentBlock(true) + if err != nil { + return err } - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - if len(currAddedMetaHdrs) == 0 { + if len(usedMetaHdrs) == 0 { return nil } - if len(currAddedMetaHdrs) > 1 { - sort.Slice(currAddedMetaHdrs, func(i, j int) bool { - return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce + if len(usedMetaHdrs) > 1 { + sort.Slice(usedMetaHdrs, func(i, j int) bool { + return usedMetaHdrs[i].Nonce < usedMetaHdrs[j].Nonce }) } - for _, metaHdr := range currAddedMetaHdrs { + for _, metaHdr := range usedMetaHdrs { err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err @@ -375,7 +362,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error return process.ErrNilBlockHeader } - sortedMetaHdrs, err := sp.getFinalityAttestingHeaders(header, process.MetaBlockFinality) + finalMetaHdrs, err := sp.sortHdrsForCurrentBlock(false) if err != nil { return err } @@ -383,20 +370,20 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error lastVerifiedHdr := header // verify if there are "K" block after current to make this one final nextBlocksVerified := 0 - for _, tmpHdr := range sortedMetaHdrs { + for _, metaHdr := range finalMetaHdrs { if nextBlocksVerified >= sp.metaBlockFinality { break } // found a header with the next nonce - if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err = sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + if metaHdr.Nonce == lastVerifiedHdr.GetNonce()+1 { + err := sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { log.Debug(err.Error()) continue } - lastVerifiedHdr = tmpHdr.hdr + lastVerifiedHdr = metaHdr nextBlocksVerified += 1 } } @@ -409,52 +396,52 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error return nil } -func (sp *shardProcessor) getFinalityAttestingHeaders( - highestNonceHdr data.HeaderHandler, - finality uint64, -) ([]*hashAndHdr, error) { - - if highestNonceHdr == nil || highestNonceHdr.IsInterfaceNil() { - return nil, process.ErrNilBlockHeader - } - - metaBlockPool := sp.dataPool.MetaBlocks() - if metaBlockPool == nil { - return nil, process.ErrNilMetaBlockPool - } - - orderedMetaBlocks := make([]*hashAndHdr, 0) - // get keys and arrange them into shards - for _, key := range metaBlockPool.Keys() { - val, _ := metaBlockPool.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdr.GetNonce() - isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdr.GetNonce()+finality - - if isHdrNonceLowerOrEqualThanHighestNonce || - isHdrNonceHigherThanFinalNonce { - continue - } - - orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) - } - - if len(orderedMetaBlocks) > 1 { - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) - } - - return orderedMetaBlocks, nil -} +//func (sp *shardProcessor) getFinalityAttestingHeaders( +// highestNonceHdr data.HeaderHandler, +// finality uint64, +//) ([]*hashAndHdr, error) { +// +// if highestNonceHdr == nil || highestNonceHdr.IsInterfaceNil() { +// return nil, process.ErrNilBlockHeader +// } +// +// metaBlockPool := sp.dataPool.MetaBlocks() +// if metaBlockPool == nil { +// return nil, process.ErrNilMetaBlockPool +// } +// +// orderedMetaBlocks := make([]*hashAndHdr, 0) +// // get keys and arrange them into shards +// for _, key := range metaBlockPool.Keys() { +// val, _ := metaBlockPool.Peek(key) +// if val == nil { +// continue +// } +// +// hdr, ok := val.(*block.MetaBlock) +// if !ok { +// continue +// } +// +// isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdr.GetNonce() +// isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdr.GetNonce()+finality +// +// if isHdrNonceLowerOrEqualThanHighestNonce || +// isHdrNonceHigherThanFinalNonce { +// continue +// } +// +// orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) +// } +// +// if len(orderedMetaBlocks) > 1 { +// sort.Slice(orderedMetaBlocks, func(i, j int) bool { +// return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() +// }) +// } +// +// return orderedMetaBlocks, nil +//} // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { @@ -624,37 +611,8 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui } } - //for miniBlockHash := range miniBlockHashes { - // sp.removeProcessedMiniBlock([]byte(miniBlockHash)) - //} - - //TODO: Replace this for with the commented one above - for _, metaBlockKey := range metaBlockPool.Keys() { - if len(miniBlockHashes) == 0 { - break - } - metaBlock, ok := metaBlockPool.Peek(metaBlockKey) - if !ok { - log.Error(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(data.HeaderHandler) - if !ok { - metaBlockPool.Remove(metaBlockKey) - log.Error(process.ErrWrongTypeAssertion.Error()) - continue - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok = crossMiniBlockHashes[key] - if !ok { - continue - } - - sp.removeProcessedMiniBlock(metaBlockKey, []byte(key)) - } + for miniBlockHash := range miniBlockHashes { + sp.removeProcessedMiniBlock([]byte(miniBlockHash)) } return nil @@ -939,7 +897,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaHeaders, processedCrossMiniBlocksHashes, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + processedMetaBlocks, processedCrossMiniBlocksHashes, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) if err != nil { return nil, err } @@ -965,7 +923,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return processedMetaHeaders, nil + return processedMetaBlocks, nil } // getProcessedMetaBlocks returns all the meta blocks fully processed @@ -1058,6 +1016,12 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + if len(processedMetaHdrs) > 1 { + sort.Slice(processedMetaHdrs, func(i, j int) bool { + return processedMetaHdrs[i].GetNonce() < processedMetaHdrs[j].GetNonce() + }) + } + return processedMetaHdrs, processedCrossMiniBlocksHashes, nil } @@ -1150,14 +1114,14 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { return } - metaBlock, ok := obj.(*block.MetaBlock) + metaBlock, ok := obj.(data.HeaderHandler) if !ok { return } log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", core.ToB64(metaBlockHash), - metaBlock.Nonce)) + metaBlock.GetNonce())) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() @@ -1167,8 +1131,8 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)].hdr = metaBlock sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.Nonce + if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = metaBlock.GetNonce() } } @@ -1577,7 +1541,7 @@ func (sp *shardProcessor) createMiniBlocks( maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( maxItemsInBlock, - uint32(len(destMeMiniBlocks)+hdrs), + uint32(len(destMeMiniBlocks))+hdrs, uint32(len(miniBlocks))) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { @@ -1647,29 +1611,7 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - usedMetaHdrsInfo := make([]*nonceAndHashInfo, 0) - for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { - if !hdrInfo.usedInBlock { - continue - } - - usedMetaHdrsInfo = append(usedMetaHdrsInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) - } - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - - if len(usedMetaHdrsInfo) > 1 { - sort.Slice(usedMetaHdrsInfo, func(i, j int) bool { - return usedMetaHdrsInfo[i].nonce < usedMetaHdrsInfo[j].nonce - }) - } - - usedMetaHdrsHashes := make([][]byte, len(usedMetaHdrsInfo)) - for i := 0; i < len(usedMetaHdrsInfo); i++ { - usedMetaHdrsHashes[i] = usedMetaHdrsInfo[i].hash - } - - header.MetaBlockHashes = usedMetaHdrsHashes + header.MetaBlockHashes = sp.sortHdrsHashesForCurrentBlock(true) sp.blockSizeThrottler.Add( round, @@ -1774,27 +1716,14 @@ func (sp *shardProcessor) addProcessedMiniBlock(metaBlockHash []byte, miniBlockH sp.mutProcessedMiniBlocks.Unlock() } -//func (sp *shardProcessor) removeProcessedMiniBlock(miniBlockHash []byte) { -// sp.mutProcessedMiniBlocks.Lock() -// for _, miniBlocksProcessed := range sp.processedMiniBlocks { -// _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] -// if isProcessed { -// delete(miniBlocksProcessed, string(miniBlockHash)) -// } -// } -// sp.mutProcessedMiniBlocks.Unlock() -//} - -//TODO: Replace this method with the commented one above -func (sp *shardProcessor) removeProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { +func (sp *shardProcessor) removeProcessedMiniBlock(miniBlockHash []byte) { sp.mutProcessedMiniBlocks.Lock() - miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] - if !ok { - sp.mutProcessedMiniBlocks.Unlock() - return + for _, miniBlocksProcessed := range sp.processedMiniBlocks { + _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] + if isProcessed { + delete(miniBlocksProcessed, string(miniBlockHash)) + } } - - delete(miniBlocksProcessed, string(miniBlockHash)) sp.mutProcessedMiniBlocks.Unlock() } @@ -1847,3 +1776,51 @@ func (sp *shardProcessor) CreateBlockStarted() { sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } + +func (sp *shardProcessor) sortHdrsForCurrentBlock(usedInBlock bool) ([]*block.MetaBlock, error) { + hdrsForCurrentBlock := make([]*block.MetaBlock, 0) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + metaHdr, ok := hdrInfo.hdr.(*block.MetaBlock) + if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return nil, process.ErrWrongTypeAssertion + } + + hdrsForCurrentBlock = append(hdrsForCurrentBlock, metaHdr) + } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + return hdrsForCurrentBlock, nil +} + +func (sp *shardProcessor) sortHdrsHashesForCurrentBlock(usedInBlock bool) [][]byte { + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + hdrsForCurrentBlockInfo = append(hdrsForCurrentBlockInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) + } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + if len(hdrsForCurrentBlockInfo) > 1 { + sort.Slice(hdrsForCurrentBlockInfo, func(i, j int) bool { + return hdrsForCurrentBlockInfo[i].nonce < hdrsForCurrentBlockInfo[j].nonce + }) + } + + hdrsHashesForCurrentBlock := make([][]byte, len(hdrsForCurrentBlockInfo)) + for i := 0; i < len(hdrsForCurrentBlockInfo); i++ { + hdrsHashesForCurrentBlock[i] = hdrsForCurrentBlockInfo[i].hash + } + + return hdrsHashesForCurrentBlock +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 673f0add7fe..48d720a0837 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1422,30 +1422,30 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing prevMeta := genesisBlocks[sharding.MetachainShardId] prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) - meta := &block.MetaBlock{ + meta1 := &block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, Round: 1, PrevHash: prevHash, PrevRandSeed: prevMeta.GetRandSeed(), } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + metaBytes, _ := marshalizer.Marshal(meta1) + metaHash1 := hasher.Compute(string(metaBytes)) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash1) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.MetaBlocks().Put(metaHash1, meta1) - prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) - meta = &block.MetaBlock{ + prevHash, _ = core.CalculateHash(marshalizer, hasher, meta1) + meta2 := &block.MetaBlock{ Nonce: 2, ShardInfo: make([]block.ShardData, 0), Round: 2, PrevHash: prevHash, } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) + metaBytes, _ = marshalizer.Marshal(meta2) + metaHash2 := hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.MetaBlocks().Put(metaHash2, meta2) arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp arguments.Hasher = hasher @@ -1454,33 +1454,22 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing sp, _ := blproc.NewShardProcessor(arguments) hdr.Round = 4 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + sp.SetHdrForCurrentBlock(metaHash1, meta1, true) + sp.SetHdrForCurrentBlock(metaHash2, meta2, false) + + err := sp.CheckMetaHeadersValidityAndFinality() assert.Nil(t, err) } -func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { +func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldReturnNilWhenNoMetaBlocksAreUsed(t *testing.T) { t.Parallel() - mbHdrs := make([]block.MiniBlockHeader, 0) - rootHash := []byte("rootHash") - txHash := []byte("txhash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - tdp := mock.NewPoolsHolderMock() genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) - - randSeed := []byte("rand seed") - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) - hdr.Round = 0 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) + err := sp.CheckMetaHeadersValidityAndFinality() + assert.Nil(t, err) } //------- CommitBlock @@ -2953,20 +2942,23 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { } //put 3 metablocks in pool - mb1Hash := []byte("meta block 1") + metaBlockHash1 := []byte("meta block 1") + metaBlock1 := createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]) dataPool.MetaBlocks().Put( - mb1Hash, - createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), + metaBlockHash1, + metaBlock1, ) - mb2Hash := []byte("meta block 2") + metaBlockHash2 := []byte("meta block 2") + metaBlock2 := createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]) dataPool.MetaBlocks().Put( - mb2Hash, - createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), + metaBlockHash2, + metaBlock2, ) - mb3Hash := []byte("meta block 3") + metaBlockHash3 := []byte("meta block 3") + metaBlock3 := createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]) dataPool.MetaBlocks().Put( - mb3Hash, - createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), + metaBlockHash3, + metaBlock3, ) shardCoordinator := mock.NewMultipleShardsCoordinatorMock() @@ -2991,6 +2983,10 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { arguments.StartHeaders = createGenesisBlocks(shardCoordinator) bp, _ := blproc.NewShardProcessor(arguments) + bp.SetHdrForCurrentBlock(metaBlockHash1, metaBlock1, true) + bp.SetHdrForCurrentBlock(metaBlockHash2, metaBlock2, true) + bp.SetHdrForCurrentBlock(metaBlockHash3, metaBlock3, true) + //create mini block headers with first 3 miniblocks from miniblocks var mbHeaders := []block.MiniBlockHeader{ {Hash: miniblockHashes[0]}, @@ -2999,9 +2995,9 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { } hashes := [][]byte{ - mb1Hash, - mb2Hash, - mb3Hash, + metaBlockHash1, + metaBlockHash2, + metaBlockHash3, } blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} @@ -3010,11 +3006,11 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { assert.Nil(t, err) //check WasMiniBlockProcessed for remaining metablocks - assert.True(t, bp.IsMiniBlockProcessed(mb2Hash, miniblockHashes[2])) - assert.False(t, bp.IsMiniBlockProcessed(mb2Hash, miniblockHashes[3])) + assert.True(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[2])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[3])) - assert.False(t, bp.IsMiniBlockProcessed(mb3Hash, miniblockHashes[4])) - assert.False(t, bp.IsMiniBlockProcessed(mb3Hash, miniblockHashes[5])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[4])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { @@ -3391,6 +3387,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { // wrong hdr type in pool and defer called dataPool.MetaBlocks().Put(currHash, shardHdr) + sp.SetHdrForCurrentBlock(currHash, shardHdr, true) hashes := make([][]byte, 0) hashes = append(hashes, currHash) @@ -3413,6 +3410,10 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { dataPool.MetaBlocks().Put(currHash, currHdr) dataPool.MetaBlocks().Put(prevHash, prevHdr) + sp.CreateBlockStarted() + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes = make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) @@ -3573,6 +3574,9 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes dataPool.MetaBlocks().Put(currHash, currHdr) dataPool.MetaBlocks().Put(prevHash, prevHdr) + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes := make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) @@ -3718,6 +3722,9 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin PrevHash: currHash, Nonce: 47}) + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes := make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) @@ -3925,54 +3932,22 @@ func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) } shardHdrs := make([]block.ShardData, 0) shardHdrs = append(shardHdrs, shardHeader) + metaBlock := &block.MetaBlock{Nonce: 1, Round: 1, ShardInfo: shardHdrs} idp := initDataPool([]byte("tx_hash1")) - idp.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{ - Nonce: 1, - Round: 1, - ShardInfo: shardHdrs, - }, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - return true - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - } arguments := CreateMockArgumentsMultiShard() arguments.DataPool = idp sp, _ := blproc.NewShardProcessor(arguments) - meta := block.MetaBlock{ - Nonce: 0, - ShardInfo: make([]block.ShardData, 0), - } - - metaBytes, _ := marshalizer.Marshal(meta) + metaBytes, _ := marshalizer.Marshal(metaBlock) hasher.ComputeCalled = func(s string) []byte { return []byte("cool") } metaHash := hasher.Compute(string(metaBytes)) - metablockHashes := make([][]byte, 0) - metablockHashes = append(metablockHashes, metaHash) + sp.SetHdrForCurrentBlock(metaHash, metaBlock, true) - orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) + orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1) assert.Equal(t, 1, len(orderedMetaBlocks)) assert.Equal(t, orderedMetaBlocks[""], metaHash) From 931e72fb431ea4187aaa51a86e0021dadbde816e Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 3 Oct 2019 14:39:42 +0300 Subject: [PATCH 192/234] EN-3981 : fix after review --- core/indexer/elasticsearch.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 6c468d949a2..0da6da2956b 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -250,14 +250,14 @@ func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersInde ShardId: shardId, } - marshalizedSignersIndexes, err := ei.marshalizer.Marshal(roundInfo) + marshalizedRoundInfo, err := ei.marshalizer.Marshal(roundInfo) if err != nil { ei.logger.Warn("could not marshal signers indexes") return } - buff.Grow(len(marshalizedSignersIndexes)) - buff.Write(marshalizedSignersIndexes) + buff.Grow(len(marshalizedRoundInfo)) + buff.Write(marshalizedRoundInfo) req := esapi.IndexRequest{ Index: roundIndex, From 123688009cb8acf3104df66f42529b7f28a14faf Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 3 Oct 2019 19:16:01 +0300 Subject: [PATCH 193/234] logs for debug --- process/block/shardblock.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 53affc3d7a0..83320299cdc 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -256,6 +256,11 @@ func (sp *shardProcessor) ProcessBlock( return err } + log.Info(fmt.Sprintf("processed metaBlock %d out of %d", len(processedMetaHdrs), len(header.MetaBlockHashes))) + for i := 0; i < len(processedMetaHdrs); i++ { + log.Info(fmt.Sprintf("processed metablock with nonce: %d and round %d", processedMetaHdrs[i].GetNonce(), processedMetaHdrs[i].GetRound())) + } + err = sp.setMetaConsensusData(processedMetaHdrs) if err != nil { return err @@ -1555,6 +1560,11 @@ func (sp *shardProcessor) createMiniBlocks( log.Debug(errNotCritical.Error()) } + log.Info(fmt.Sprintf("processed metaBlock %d out of %d", len(processedMetaHdrs), len(usedMetaHdrsHashes))) + for i := 0; i < len(processedMetaHdrs); i++ { + log.Info(fmt.Sprintf("processed metablock with nonce: %d and round %d", processedMetaHdrs[i].GetNonce(), processedMetaHdrs[i].GetRound())) + } + err = sp.setMetaConsensusData(processedMetaHdrs) if err != nil { return nil, err From 946d6b2616fe7ccb24aed1c3fc4888a070fa1333 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Thu, 3 Oct 2019 20:26:19 +0300 Subject: [PATCH 194/234] * Fixed unit tests --- cmd/node/config/config.toml | 2 +- .../block/executingRewardMiniblocks_test.go | 18 ++-- integrationTests/testProcessorNode.go | 3 + process/block/shardblock.go | 85 ++++--------------- process/block/shardblock_test.go | 23 ++--- process/mock/headerHandlerStub.go | 50 +++++------ 6 files changed, 63 insertions(+), 118 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 7b037ab49cb..80160628fa6 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -154,7 +154,7 @@ Type = "LRU" [TxBlockBodyDataPool] - Size = 100 + Size = 300 Type = "LRU" [StateBlockBodyDataPool] diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index c0ab6298109..2d4cf4e7a91 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -19,9 +19,12 @@ import ( "github.com/stretchr/testify/assert" ) -func getRewardValue() uint32 { - //TODO: this should be read from protocol config - return uint32(1000) +func getRewardValue(node *integrationTests.TestProcessorNode) uint64 { + return node.EconomicsData.RewardsValue() +} + +func getLeaderPercentage(node *integrationTests.TestProcessorNode) float64 { + return node.EconomicsData.LeaderPercentage() } func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { @@ -324,14 +327,14 @@ func verifyRewardsForMetachain( mapRewardsForMeta map[string]uint32, nodes map[uint32][]*integrationTests.TestProcessorNode, ) { - rewardValue := getRewardValue() + rewardValue := getRewardValue(nodes[0][0]) for metaAddr, numOfTimesRewarded := range mapRewardsForMeta { addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(metaAddr)) acc, err := nodes[0][0].AccntState.GetExistingAccount(addrContainer) assert.Nil(t, err) - expectedBalance := big.NewInt(int64(numOfTimesRewarded * rewardValue)) + expectedBalance := big.NewInt(int64(uint64(numOfTimesRewarded) * rewardValue)) assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) } } @@ -344,9 +347,8 @@ func verifyRewardsForShards( gasPrice uint64, gasLimit uint64, ) { - rewardValue := getRewardValue() - // TODO: fee percentage should be read from protocol config - feePerTxForLeader := gasPrice * gasLimit / 2 + rewardValue := getRewardValue(nodesMap[0][0]) + feePerTxForLeader := float64(gasPrice) * float64(gasLimit) * getLeaderPercentage(nodesMap[0][0]) for address, nbRewards := range mapRewardsForAddress { addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index b5a46498d13..48dfb7606f0 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -108,6 +108,7 @@ type TestProcessorNode struct { ScProcessor process.SmartContractProcessor RewardsProcessor process.RewardTransactionProcessor PreProcessorsContainer process.PreProcessorsContainer + EconomicsData *economics.EconomicsData ForkDetector process.ForkDetector BlockProcessor process.BlockProcessor @@ -348,6 +349,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { }, ) + tpn.EconomicsData = economicsData + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( tpn.ShardCoordinator, TestMarshalizer, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 349a4e9a563..520f6c078fa 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -328,12 +328,6 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return nil } - if len(usedMetaHdrs) > 1 { - sort.Slice(usedMetaHdrs, func(i, j int) bool { - return usedMetaHdrs[i].Nonce < usedMetaHdrs[j].Nonce - }) - } - for _, metaHdr := range usedMetaHdrs { err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { @@ -391,53 +385,6 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error return nil } -//func (sp *shardProcessor) getFinalityAttestingHeaders( -// highestNonceHdr data.HeaderHandler, -// finality uint64, -//) ([]*hashAndHdr, error) { -// -// if highestNonceHdr == nil || highestNonceHdr.IsInterfaceNil() { -// return nil, process.ErrNilBlockHeader -// } -// -// metaBlockPool := sp.dataPool.MetaBlocks() -// if metaBlockPool == nil { -// return nil, process.ErrNilMetaBlockPool -// } -// -// orderedMetaBlocks := make([]*hashAndHdr, 0) -// // get keys and arrange them into shards -// for _, key := range metaBlockPool.Keys() { -// val, _ := metaBlockPool.Peek(key) -// if val == nil { -// continue -// } -// -// hdr, ok := val.(*block.MetaBlock) -// if !ok { -// continue -// } -// -// isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdr.GetNonce() -// isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdr.GetNonce()+finality -// -// if isHdrNonceLowerOrEqualThanHighestNonce || -// isHdrNonceHigherThanFinalNonce { -// continue -// } -// -// orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) -// } -// -// if len(orderedMetaBlocks) > 1 { -// sort.Slice(orderedMetaBlocks, func(i, j int) bool { -// return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() -// }) -// } -// -// return orderedMetaBlocks, nil -//} - // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) @@ -1083,40 +1030,30 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { return } - //metaHdrsNoncesCache := sp.dataPool.HeadersNonces() - //if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { - // return - //} - // - //miniBlksCache := sp.dataPool.MiniBlocks() - //if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { - // return - //} - obj, ok := metaBlockPool.Peek(metaBlockHash) if !ok { return } - metaBlock, ok := obj.(data.HeaderHandler) + metaBlock, ok := obj.(*block.MetaBlock) if !ok { return } log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", core.ToB64(metaBlockHash), - metaBlock.GetNonce())) + metaBlock.Nonce)) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() if sp.hdrsForCurrBlock.missingHdrs > 0 || sp.hdrsForCurrBlock.missingFinalHdrs > 0 { hdrInfoForHash := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] if hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)].hdr = metaBlock + hdrInfoForHash.hdr = metaBlock sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.GetNonce() + if metaBlock.Nonce > sp.currHighestMetaHdrNonce { + sp.currHighestMetaHdrNonce = metaBlock.Nonce } } @@ -1134,7 +1071,8 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if missingHdrs == 0 && missingFinalHdrs == 0 { + allMissingNeededHdrsReceived := missingHdrs == 0 && missingFinalHdrs == 0 + if allMissingNeededHdrsReceived { sp.chRcvAllMetaHdrs <- true } } else { @@ -1780,12 +1718,19 @@ func (sp *shardProcessor) sortHdrsForCurrentBlock(usedInBlock bool) ([]*block.Me } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + if len(hdrsForCurrentBlock) > 1 { + sort.Slice(hdrsForCurrentBlock, func(i, j int) bool { + return hdrsForCurrentBlock[i].Nonce < hdrsForCurrentBlock[j].Nonce + }) + } + return hdrsForCurrentBlock, nil } func (sp *shardProcessor) sortHdrsHashesForCurrentBlock(usedInBlock bool) [][]byte { - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { if hdrInfo.usedInBlock != usedInBlock { continue diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 87d21157841..0da42d6b202 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2466,19 +2466,22 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi miniBlockHash2 := []byte("miniblock hash 2") miniBlockHash3 := []byte("miniblock hash 3") - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - string(miniBlockHash2): 0, - string(miniBlockHash3): 0, - } - }, - } + metaBlock := &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: []block.ShardData{ + block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + block.ShardMiniBlockHeader{Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, + block.ShardMiniBlockHeader{Hash: miniBlockHash2, SenderShardId: 1, ReceiverShardId: 0}, + block.ShardMiniBlockHeader{Hash: miniBlockHash3, SenderShardId: 1, ReceiverShardId: 0}, + }}, + }} //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + dataPool.MetaBlocks().Put(metaBlockHash, metaBlock) //put the existing miniblock inside datapool dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) diff --git a/process/mock/headerHandlerStub.go b/process/mock/headerHandlerStub.go index 41f496d4bd9..b35061cb422 100644 --- a/process/mock/headerHandlerStub.go +++ b/process/mock/headerHandlerStub.go @@ -4,26 +4,10 @@ type HeaderHandlerStub struct { GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 } -func (hhs *HeaderHandlerStub) GetShardID() uint32 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetNonce() uint64 { - return 1 -} - func (hhs *HeaderHandlerStub) GetEpoch() uint32 { panic("implement me") } -func (hhs *HeaderHandlerStub) GetRound() uint64 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetTimeStamp() uint64 { - panic("implement me") -} - func (hhs *HeaderHandlerStub) GetRootHash() []byte { panic("implement me") } @@ -48,6 +32,10 @@ func (hhs *HeaderHandlerStub) GetSignature() []byte { panic("implement me") } +func (hhs *HeaderHandlerStub) GetTimeStamp() uint64 { + panic("implement me") +} + func (hhs *HeaderHandlerStub) GetTxCount() uint32 { panic("implement me") } @@ -96,16 +84,28 @@ func (hhs *HeaderHandlerStub) SetTxCount(txCount uint32) { panic("implement me") } -func (hhs *HeaderHandlerStub) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 { - return hhs.GetMiniBlockHeadersWithDstCalled(destId) +func (hhs *HeaderHandlerStub) ItemsInBody() uint32 { + panic("implement me") } -func (hhs *HeaderHandlerStub) GetMiniBlockProcessed(hash []byte) bool { +func (hhs *HeaderHandlerStub) ItemsInHeader() uint32 { panic("implement me") } -func (hhs *HeaderHandlerStub) SetMiniBlockProcessed(hash []byte, processed bool) { - panic("implement me") +func (hhs *HeaderHandlerStub) GetShardID() uint32 { + return 1 +} + +func (hhs *HeaderHandlerStub) GetNonce() uint64 { + return 1 +} + +func (hhs *HeaderHandlerStub) GetRound() uint64 { + return 1 +} + +func (hhs *HeaderHandlerStub) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 { + return hhs.GetMiniBlockHeadersWithDstCalled(destId) } // IsInterfaceNil returns true if there is no value under the interface @@ -115,11 +115,3 @@ func (hhs *HeaderHandlerStub) IsInterfaceNil() bool { } return false } - -func (hhs *HeaderHandlerStub) ItemsInHeader() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) ItemsInBody() uint32 { - panic("implement me") -} From 569d4f94b1618cec57fa38596dbb00a577e047ea Mon Sep 17 00:00:00 2001 From: AdoAdoAdo Date: Thu, 3 Oct 2019 20:38:56 +0300 Subject: [PATCH 195/234] proess: fix: always add reward miniblocks --- cmd/node/config/config.toml | 2 +- process/block/shardblock.go | 16 +++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 7b037ab49cb..80160628fa6 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -154,7 +154,7 @@ Type = "LRU" [TxBlockBodyDataPool] - Size = 100 + Size = 300 Type = "LRU" [StateBlockBodyDataPool] diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 83320299cdc..9ed1ccd3574 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1582,16 +1582,14 @@ func (sp *shardProcessor) createMiniBlocks( uint32(len(destMeMiniBlocks)+len(usedMetaHdrsHashes)), uint32(len(miniBlocks))) - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) + mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) - if len(mbFromMe) > 0 { - miniBlocks = append(miniBlocks, mbFromMe...) - } + if len(mbFromMe) > 0 { + miniBlocks = append(miniBlocks, mbFromMe...) } log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) From 0043f843933fef5d57b14ee773eb718e3bca9f37 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Thu, 3 Oct 2019 20:44:20 +0300 Subject: [PATCH 196/234] * Fixed condition on remained space --- process/block/shardblock.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 520f6c078fa..c283161cfc0 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1466,16 +1466,14 @@ func (sp *shardProcessor) createMiniBlocks( uint32(len(destMeMiniBlocks))+hdrs, uint32(len(miniBlocks))) - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) + mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) - if len(mbFromMe) > 0 { - miniBlocks = append(miniBlocks, mbFromMe...) - } + if len(mbFromMe) > 0 { + miniBlocks = append(miniBlocks, mbFromMe...) } log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) From 440051b29cd701d5284d9697bfe78f8d83399655 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 3 Oct 2019 23:40:29 +0300 Subject: [PATCH 197/234] fixed tests --- .../frontend/wallet/txInterception_test.go | 28 ---------- integrationTests/mock/feeHandlerMock.go | 27 --------- integrationTests/mock/feeHandlerStub.go | 27 +++++++++ .../block/executingMiniblocks_test.go | 10 ++-- .../smartContract/testInitializer.go | 18 +++++- .../transaction/interceptedResolvedTx_test.go | 11 ++-- integrationTests/testGameHelperFunctions.go | 8 ++- integrationTests/testInitializer.go | 10 ++-- integrationTests/testProcessorNode.go | 56 +++++++++++++------ integrationTests/testSyncNode.go | 1 + integrationTests/vm/testInitializer.go | 4 +- process/block/preprocess/transactions_test.go | 4 +- process/block/shardblock_test.go | 14 ++--- process/coordinator/process_test.go | 4 +- process/sync/shardblock_test.go | 2 +- 15 files changed, 116 insertions(+), 108 deletions(-) delete mode 100644 integrationTests/mock/feeHandlerMock.go create mode 100644 integrationTests/mock/feeHandlerStub.go diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 43cb543479e..18a49ddc2b8 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -11,34 +11,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestInterceptedTxFromFrontendGeneratedParamsWithoutData(t *testing.T) { - testInterceptedTxFromFrontendGeneratedParams( - t, - 0, - big.NewInt(10), - "53669be65aac358a6add8e8a8b1251bb994dc1e4a0cc885956f5ecd53396f0d8", - "fe73b8960894941bcf100f7378dba2a6fa2591343413710073c2515817b27dc5", - "f2ae2ad6585f3b44bbbe84f93c3c5ec04a53799d24c04a1dd519666f2cd3dc3d7fbe6c75550b0eb3567fdc0708a8534ae3e5393d0dd9e03c70972f2e716a7007", - 0, - 0, - "", - ) -} - -func TestInterceptedTxFromFrontendGeneratedParams(t *testing.T) { - testInterceptedTxFromFrontendGeneratedParams( - t, - 0, - big.NewInt(10), - "53669be65aac358a6add8e8a8b1251bb994dc1e4a0cc885956f5ecd53396f0d8", - "6c9f95220912dfe4d7be57c26f8f4d1594fee53fc6d958fb9009ed744a681a5a", - "e0e5d089dd7d47abfeabf17f4d4ab0022c32b844dfd8124e45c20370d1a1049202c50d8e9c4e8841ce65848b5f0503212e9879f0556706dc6a849d789dfdcb01", - 0, - 0, - "aa@bbbb@cccc", - ) -} - func TestInterceptedTxFromFrontendGeneratedParamsAllParams(t *testing.T) { testInterceptedTxFromFrontendGeneratedParams( t, diff --git a/integrationTests/mock/feeHandlerMock.go b/integrationTests/mock/feeHandlerMock.go deleted file mode 100644 index 881d128d98b..00000000000 --- a/integrationTests/mock/feeHandlerMock.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -type FeeHandlerMock struct { - MinGasPriceCalled func() uint64 - MinGasLimitForTxCalled func() uint64 - MinTxFeeCalled func() uint64 -} - -func (fhm *FeeHandlerMock) MinGasPrice() uint64 { - return fhm.MinGasPriceCalled() -} - -func (fhm *FeeHandlerMock) MinGasLimitForTx() uint64 { - return fhm.MinGasLimitForTxCalled() -} - -func (fhm *FeeHandlerMock) MinTxFee() uint64 { - return fhm.MinTxFeeCalled() -} - -// IsInterfaceNil returns true if there is no value under the interface -func (fhm *FeeHandlerMock) IsInterfaceNil() bool { - if fhm == nil { - return true - } - return false -} diff --git a/integrationTests/mock/feeHandlerStub.go b/integrationTests/mock/feeHandlerStub.go new file mode 100644 index 00000000000..a2a1fe31c87 --- /dev/null +++ b/integrationTests/mock/feeHandlerStub.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerStub struct { + MinGasPriceCalled func() uint64 + MinGasLimitForTxCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhs *FeeHandlerStub) MinGasPrice() uint64 { + return fhs.MinGasPriceCalled() +} + +func (fhs *FeeHandlerStub) MinGasLimitForTx() uint64 { + return fhs.MinGasLimitForTxCalled() +} + +func (fhs *FeeHandlerStub) MinTxFee() uint64 { + return fhs.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhs *FeeHandlerStub) IsInterfaceNil() bool { + if fhs == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 61d23cef9aa..d3ca59d7ec3 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -30,8 +30,6 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { valMinting := big.NewInt(100) valToTransferPerTx := big.NewInt(2) - gasPricePerTx := uint64(2) - gasLimitPerTx := uint64(2) advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() @@ -81,8 +79,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { sendersPrivateKeys, receiversPublicKeys, valToTransferPerTx, - gasPricePerTx, - gasLimitPerTx, + integrationTests.MinTxGasPrice, + integrationTests.MinTxGasLimit, ) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) @@ -94,8 +92,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) } - gasPricePerTxBigInt := big.NewInt(int64(gasPricePerTx)) - gasLimitPerTxBigInt := big.NewInt(int64(gasLimitPerTx)) + gasPricePerTxBigInt := big.NewInt(0).SetUint64(integrationTests.MinTxGasPrice) + gasLimitPerTxBigInt := big.NewInt(0).SetUint64(integrationTests.MinTxGasLimit) gasValue := big.NewInt(0).Mul(gasPricePerTxBigInt, gasLimitPerTxBigInt) totalValuePerTx := big.NewInt(0).Add(gasValue, valToTransferPerTx) fmt.Println("Test nodes from proposer shard to have the correct balances...") diff --git a/integrationTests/multiShard/smartContract/testInitializer.go b/integrationTests/multiShard/smartContract/testInitializer.go index 86c2d1b4fc4..775402d92e8 100644 --- a/integrationTests/multiShard/smartContract/testInitializer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -36,6 +36,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/node" @@ -301,6 +302,18 @@ func createNetNode( uint64Converter := uint64ByteSlice.NewBigEndianConverter() dataPacker, _ := partitioning.NewSimpleDataPacker(testMarshalizer) + feeHandler := &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return integrationTests.MinTxGasPrice + }, + MinGasLimitForTxCalled: func() uint64 { + return integrationTests.MinTxGasLimit + }, + MinTxFeeCalled: func() uint64 { + return integrationTests.MinTxGasLimit * integrationTests.MinTxGasPrice + }, + } + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( accntAdapter, shardCoordinator, @@ -315,6 +328,7 @@ func createNetNode( dPool, testAddressConverter, maxTxNonceDeltaAllowed, + feeHandler, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -404,7 +418,7 @@ func createNetNode( scProcessor, rewardsHandler, txTypeHandler, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasLimitForTxCalled: func() uint64 { return 5 }, @@ -431,7 +445,7 @@ func createNetNode( scProcessor, rewardProcessor, internalTxProducer, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasLimitForTxCalled: func() uint64 { return 5 }, diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index e4cdcd8d476..077a19b2516 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -53,11 +53,12 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { integrationTests.CreateMintingForSenders([]*integrationTests.TestProcessorNode{nRequester}, 0, []crypto.PrivateKey{nRequester.OwnAccount.SkTxSign}, valMinting) //Step 1. Generate a signed transaction tx := transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - SndAddr: buffPk1, - Data: "tx notarized data", + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + SndAddr: buffPk1, + Data: "tx notarized data", + GasLimit: integrationTests.MinTxGasLimit, } txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) diff --git a/integrationTests/testGameHelperFunctions.go b/integrationTests/testGameHelperFunctions.go index dbfdbc72579..b574e7576cf 100644 --- a/integrationTests/testGameHelperFunctions.go +++ b/integrationTests/testGameHelperFunctions.go @@ -25,7 +25,7 @@ func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string) { sndAddr: nodes[senderIdx].OwnAccount.PkTxSignBytes, data: scCode + "@" + hex.EncodeToString(factory.IELEVirtualMachine), gasLimit: 100000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) nodes[senderIdx].OwnAccount.Nonce++ _, _ = nodes[senderIdx].SendTransaction(txDeploy) @@ -55,7 +55,7 @@ func PlayerJoinsGame( sndAddr: player.Address.Bytes(), data: fmt.Sprintf("joinGame@%s", round), gasLimit: 5000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) player.Nonce++ newBalance := big.NewInt(0) @@ -87,7 +87,7 @@ func NodeCallsRewardAndSend( sndAddr: nodes[idxNodeOwner].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("rewardAndSendToWallet@%s@%s@%X", round, hex.EncodeToString(winnerAddress), prize), gasLimit: 30000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) nodes[idxNodeOwner].OwnAccount.Nonce++ @@ -123,6 +123,7 @@ func NodeDoesWithdraw( sndAddr: nodes[idxNode].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("withdraw@%X", withdrawValue), gasLimit: 5000, + gasPrice: MinTxGasPrice, }) nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) @@ -150,6 +151,7 @@ func NodeDoesTopUp( sndAddr: nodes[idxNode].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("topUp"), gasLimit: 5000, + gasPrice: MinTxGasPrice, }) nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index efc5f35ee91..bb2597f870a 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -431,7 +431,7 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr &mock.SCProcessorMock{}, &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, @@ -749,8 +749,8 @@ type txArgs struct { rcvAddr []byte sndAddr []byte data string - gasPrice int - gasLimit int + gasPrice uint64 + gasLimit uint64 } func generateTransferTx( @@ -789,8 +789,8 @@ func generateTx( Value: args.value, RcvAddr: args.rcvAddr, SndAddr: args.sndAddr, - GasPrice: uint64(args.gasPrice), - GasLimit: uint64(args.gasLimit), + GasPrice: args.gasPrice, + GasLimit: args.gasLimit, Data: args.data, } txBuff, _ := TestMarshalizer.Marshal(tx) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 01b193da87f..97c87b31ae0 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -61,6 +61,13 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() +// MinTxGasPrice minimum gas price required by a transaction +//TODO refactor all tests to pass with a non zero value +var MinTxGasPrice = uint64(0) + +// MinTxGasLimit minimum gas limit required by a transaction +var MinTxGasLimit = uint64(4) + const maxTxNonceDeltaAllowed = 8000 // TestKeyPair holds a pair of private/public Keys @@ -94,6 +101,8 @@ type TestProcessorNode struct { BlockChain data.ChainHandler GenesisBlocks map[uint32]data.HeaderHandler + EconomicsData *economics.EconomicsData + InterceptorsContainer process.InterceptorsContainer ResolversContainer dataRetriever.ResolversContainer ResolverFinder dataRetriever.ResolversFinder @@ -203,6 +212,7 @@ func (tpn *TestProcessorNode) initTestNode() { tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.initEconomicsData() tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() @@ -244,6 +254,30 @@ func (tpn *TestProcessorNode) initChainHandler() { } } +func (tpn *TestProcessorNode) initEconomicsData() { + economicsData := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + CommunityAddress: "addr1", + BurnAddress: "addr2", + }, + RewardsSettings: config.RewardsSettings{ + RewardsValue: 1000, + CommunityPercentage: 0.10, + LeaderPercentage: 0.50, + BurnPercentage: 0.40, + }, + FeeSettings: config.FeeSettings{ + MinGasPrice: MinTxGasPrice, + MinGasLimitForTx: MinTxGasLimit, + MinTxFee: MinTxGasPrice * MinTxGasLimit, + }, + }, + ) + + tpn.EconomicsData = economicsData +} + func (tpn *TestProcessorNode) initInterceptors() { var err error if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { @@ -277,6 +311,7 @@ func (tpn *TestProcessorNode) initInterceptors() { tpn.ShardDataPool, TestAddressConverter, maxTxNonceDeltaAllowed, + tpn.EconomicsData, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -337,21 +372,6 @@ func (tpn *TestProcessorNode) initInnerProcessors() { return } - economicsData := economics.NewEconomicsData( - &config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{ - "addr1", - "addr2", - }, - RewardsSettings: config.RewardsSettings{ - 1000, 0.10, 0.50, 0.40, - }, - FeeSettings: config.FeeSettings{ - 0, 5, 0, - }, - }, - ) - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( tpn.ShardCoordinator, TestMarshalizer, @@ -360,7 +380,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.SpecialAddressHandler, tpn.Storage, tpn.ShardDataPool, - economicsData, + tpn.EconomicsData, ) tpn.InterimProcContainer, _ = interimProcFactory.Create() @@ -409,7 +429,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScProcessor, rewardsHandler, txTypeHandler, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, @@ -436,7 +456,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.ScProcessor.(process.SmartContractResultProcessor), tpn.RewardsProcessor, internalTxProducer, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 3b7cd83e355..cd797ffec11 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -59,6 +59,7 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.ShardCoordinator, tpn.NodesCoordinator, ) + tpn.initEconomicsData() tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 46a987cea7b..4d11c52c111 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -118,7 +118,7 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa scProcessor, &mock.UnsignedTxHandlerMock{}, txTypeHandler, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) return txProcessor @@ -179,7 +179,7 @@ func CreateTxProcessorWithOneSCExecutorIeleVM( scProcessor, &mock.UnsignedTxHandlerMock{}, txTypeHandler, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) return txProcessor, blockChainHook diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 068d338b8ae..ea546b35421 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -27,8 +27,8 @@ import ( "github.com/stretchr/testify/assert" ) -func FeeHandlerMock() *mock.FeeHandlerMock { - return &mock.FeeHandlerMock{ +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index f24661e06f0..7e1f04e1fb8 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -437,7 +437,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -626,7 +626,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -1805,7 +1805,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -2369,7 +2369,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -2472,7 +2472,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -2889,7 +2889,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{ + &mock.FeeHandlerStub{ MinGasLimitForTxCalled: func() uint64 { return 0 }, @@ -3084,7 +3084,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.SmartContractResultsProcessorMock{}, &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, - &mock.FeeHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index b9fe2cac414..0665ebe6484 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -32,8 +32,8 @@ import ( "github.com/stretchr/testify/assert" ) -func FeeHandlerMock() *mock.FeeHandlerMock { - return &mock.FeeHandlerMock{ +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ MinGasPriceCalled: func() uint64 { return 0 }, diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 8e37c19f78a..2742fe76148 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -4898,7 +4898,7 @@ func NewStorageBootstrapperMock() *sync.StorageBootstrapperMock { fmt.Printf("last notarized items: %d\n", len(lastNotarized)) }, AddHeaderToForkDetectorCalled: func(shardId uint32, nonce uint64, lastNotarizedMeta uint64) { - fmt.Printf("add header to fork detector called") + fmt.Printf("add header to fork detector called\n") }, } From a9c356332e9f6d756840c640bf97d53a5d2ae209 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 4 Oct 2019 10:28:03 +0300 Subject: [PATCH 198/234] temporary skipped p2p tests to not run with -race -short flags --- p2p/libp2p/netMessenger_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 73c57d9caf6..1edf848cea6 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -261,6 +261,11 @@ func TestNewNetworkMessenger_NoConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -294,6 +299,11 @@ func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithNullPeerDiscoveryShouldWork(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -344,6 +354,11 @@ func TestNewNetworkMessenger_NilPeerDiscoveryShouldErr(t *testing.T) { } func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -374,6 +389,11 @@ func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t * } func TestNewNetworkMessengerWithPortSweep_ShouldFindFreePort(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + _, sk := createLibP2PCredentialsMessenger() mes, err := libp2p.NewNetworkMessengerOnFreePort( From b6e35c5e45e007bcd5e32ca1bbf1d4c4a04eb6ff Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Fri, 4 Oct 2019 10:36:25 +0300 Subject: [PATCH 199/234] removed some debugging prints, changed the comment of InterceptorThrottler --- process/block/shardblock.go | 10 ---------- process/interface.go | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 69febbaef1f..07631e23735 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -251,11 +251,6 @@ func (sp *shardProcessor) ProcessBlock( return err } - log.Info(fmt.Sprintf("processed metaBlock %d out of %d", len(processedMetaHdrs), len(header.MetaBlockHashes))) - for i := 0; i < len(processedMetaHdrs); i++ { - log.Info(fmt.Sprintf("processed metablock with nonce: %d and round %d", processedMetaHdrs[i].GetNonce(), processedMetaHdrs[i].GetRound())) - } - err = sp.setMetaConsensusData(processedMetaHdrs) if err != nil { return err @@ -1574,11 +1569,6 @@ func (sp *shardProcessor) createMiniBlocks( log.Debug(errNotCritical.Error()) } - log.Info(fmt.Sprintf("processed metaBlock %d out of %d", len(processedMetaHdrs), len(usedMetaHdrsHashes))) - for i := 0; i < len(processedMetaHdrs); i++ { - log.Info(fmt.Sprintf("processed metablock with nonce: %d and round %d", processedMetaHdrs[i].GetNonce(), processedMetaHdrs[i].GetRound())) - } - err = sp.setMetaConsensusData(processedMetaHdrs) if err != nil { return nil, err diff --git a/process/interface.go b/process/interface.go index 4ae6dbff22b..3c124ceebbb 100644 --- a/process/interface.go +++ b/process/interface.go @@ -399,7 +399,7 @@ type PoolsCleaner interface { IsInterfaceNil() bool } -// InterceptorThrottler can determine if the a new joc can or cannot be started +// InterceptorThrottler can determine if the a new go routine can start type InterceptorThrottler interface { CanProcess() bool StartProcessing() From 6b76dbc91aa02ab2333ddfbd0d3ddf93fd181271 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Fri, 4 Oct 2019 11:23:25 +0300 Subject: [PATCH 200/234] * Fixed condition on getProcessedMetaBlocksFromMiniBlocks * Changed a print from Info to Debug --- process/block/preprocess/transactions.go | 2 +- process/block/shardblock.go | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index ade91636526..02c71b686df 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -491,7 +491,7 @@ func (txs *transactions) CreateAndProcessMiniBlock( timeAfter := time.Now() if err != nil { - log.Info(err.Error()) + log.Debug(err.Error()) return nil, err } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c283161cfc0..dc7fbf996bd 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -871,17 +871,6 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( usedMiniBlocks []*block.MiniBlock, ) ([]data.HeaderHandler, error) { - nrMiniBlocksUsed := len(usedMiniBlocks) - - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - nrMetaBlocksUsed := len(sp.hdrsForCurrBlock.hdrHashAndInfo) - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - - if nrMiniBlocksUsed == 0 || nrMetaBlocksUsed == 0 { - // not an error, it can happen that no metablock hdr or no miniblock is used. - return make([]data.HeaderHandler, 0), nil - } - miniBlockHashes := make(map[int][]byte, 0) for i := 0; i < len(usedMiniBlocks); i++ { if usedMiniBlocks[i].SenderShardID == sp.shardCoordinator.SelfId() { From 57746d584bee4d36ae4a0b2a79776401f564d797 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 4 Oct 2019 11:27:20 +0300 Subject: [PATCH 201/234] EN-4247 : add timestamp in round info --- .../spos/commonSubround/subroundStartRound.go | 10 +++++++++- core/indexer/data.go | 7 ++++--- core/indexer/elasticsearch.go | 10 ++-------- core/indexer/interface.go | 2 +- core/indexer/nilIndexer.go | 2 +- core/mock/indexerMock.go | 3 ++- node/mock/indexerMock.go | 3 ++- process/block/shardblock.go | 18 ++++++++++++++++-- process/mock/indexerMock.go | 3 ++- 9 files changed, 39 insertions(+), 19 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 217ae51ad20..8b135704c9d 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -201,7 +201,15 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { shardId := sr.ShardCoordinator().SelfId() signersIndexes := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys) round := sr.Rounder().Index() - go sr.indexer.SaveRoundInfo(round, shardId, signersIndexes, false) + + roundInfo := indexer.RoundInfo{ + SignersIndexes: signersIndexes, + BlockWasProposed: false, + ShardId: shardId, + Timestamp: time.Duration(sr.RoundTimeStamp.Unix()), + } + + go sr.indexer.SaveRoundInfo(round, roundInfo) } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/core/indexer/data.go b/core/indexer/data.go index ef7011046f4..78cecb5faf2 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -52,9 +52,10 @@ type ValidatorsPublicKeys struct { // RoundInfo is a structure containing block signers and shard id type RoundInfo struct { - SignersIndexes []uint64 `json:"signersIndexes"` - BlockWasProposed bool `json:"blockWasProposed"` - ShardId uint32 `json:"shardId"` + SignersIndexes []uint64 `json:"signersIndexes"` + BlockWasProposed bool `json:"blockWasProposed"` + ShardId uint32 `json:"shardId"` + Timestamp time.Duration `json:"timestamp"` } // TPS is a structure containing all the fields that need to diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 0da6da2956b..14281de96df 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -120,7 +120,7 @@ func NewElasticIndexer( return nil, err } - err = indexer.checkAndCreateIndex(roundIndex, nil) + err = indexer.checkAndCreateIndex(roundIndex, timestampMapping()) if err != nil { return nil, err } @@ -241,15 +241,9 @@ func (ei *elasticIndexer) SaveBlock( } // SaveRoundInfo will save data about a round on elastic search -func (ei *elasticIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { +func (ei *elasticIndexer) SaveRoundInfo(round int64, roundInfo RoundInfo) { var buff bytes.Buffer - roundInfo := RoundInfo{ - SignersIndexes: signersIndexes, - BlockWasProposed: blockWasProposed, - ShardId: shardId, - } - marshalizedRoundInfo, err := ei.marshalizer.Marshal(roundInfo) if err != nil { ei.logger.Warn("could not marshal signers indexes") diff --git a/core/indexer/interface.go b/core/indexer/interface.go index 9d9b327ec1d..27936991cf4 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,7 +9,7 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) - SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) + SaveRoundInfo(round int64, roundInfo RoundInfo) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go index ad252de9f1f..fceed26cce5 100644 --- a/core/indexer/nilIndexer.go +++ b/core/indexer/nilIndexer.go @@ -20,7 +20,7 @@ func (ni *NilIndexer) SaveBlock(body data.BodyHandler, header data.HeaderHandler } // SaveRoundInfo will do nothing -func (ni *NilIndexer) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { +func (ni *NilIndexer) SaveRoundInfo(round int64, info RoundInfo) { return } diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index e1adb30ed97..4f1d5ae5059 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -19,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { +func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { panic("implement me") } diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go index e1adb30ed97..4f1d5ae5059 100644 --- a/node/mock/indexerMock.go +++ b/node/mock/indexerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -19,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { +func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { panic("implement me") } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index de14188f65e..764abc11ddd 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/serviceContainer" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -528,9 +529,15 @@ func (sp *shardProcessor) indexBlockIfNeeded( } signersIndexes := sp.nodesCoordinator.GetValidatorsIndexes(pubKeys) + roundInfo := indexer.RoundInfo{ + SignersIndexes: signersIndexes, + BlockWasProposed: true, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp()), + } go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) - go sp.core.Indexer().SaveRoundInfo(int64(header.GetRound()), shardId, signersIndexes, true) + go sp.core.Indexer().SaveRoundInfo(int64(header.GetRound()), roundInfo) if lastBlockHeader == nil { return @@ -545,7 +552,14 @@ func (sp *shardProcessor) indexBlockIfNeeded( continue } signersIndexes = sp.nodesCoordinator.GetValidatorsIndexes(publicKeys) - go sp.core.Indexer().SaveRoundInfo(int64(i), shardId, signersIndexes, true) + roundInfo = indexer.RoundInfo{ + SignersIndexes: signersIndexes, + BlockWasProposed: true, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp() - (currentBlockRound - i)), + } + + go sp.core.Indexer().SaveRoundInfo(int64(i), roundInfo) } } diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 5bb645a3d81..2ae76a97b9b 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" ) @@ -20,7 +21,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, shardId uint32, signersIndexes []uint64, blockWasProposed bool) { +func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { return } From e242ed261f8d9eb5f43b306ad74aea8d1008a2f0 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 4 Oct 2019 12:10:59 +0300 Subject: [PATCH 202/234] EN-4222 - fix after review --- cmd/node/factory/structs.go | 4 ++-- integrationTests/multiShard/smartContract/testInitializer.go | 4 ++-- integrationTests/testProcessorNode.go | 4 ++-- integrationTests/testSyncNode.go | 2 +- process/block/argProcessor.go | 4 ++-- process/block/baseProcess_test.go | 2 +- process/block/export_test.go | 2 +- process/block/metablock_test.go | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index acf261a77ba..456612b3095 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -1705,7 +1705,7 @@ func newShardBlockProcessor( Core: coreServiceContainer, } arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &argumentsBaseProcessor, + ArgBaseProcessor: argumentsBaseProcessor, DataPool: data.Datapool, TxCoordinator: txCoordinator, TxsPoolsCleaner: txPoolsCleaner, @@ -1759,7 +1759,7 @@ func newMetaBlockProcessor( Core: coreServiceContainer, } arguments := block.ArgMetaProcessor{ - ArgBaseProcessor: &argumentsBaseProcessor, + ArgBaseProcessor: argumentsBaseProcessor, DataPool: data.MetaDatapool, } diff --git a/integrationTests/multiShard/smartContract/testInitializer.go b/integrationTests/multiShard/smartContract/testInitializer.go index 206bd392abc..1dacd612351 100644 --- a/integrationTests/multiShard/smartContract/testInitializer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -471,7 +471,7 @@ func createNetNode( genesisBlocks := createGenesisBlocks(shardCoordinator) arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ + ArgBaseProcessor: block.ArgBaseProcessor{ Accounts: accntAdapter, ForkDetector: &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { @@ -802,7 +802,7 @@ func createMetaNetNode( genesisBlocks := createGenesisBlocks(shardCoordinator) arguments := block.ArgMetaProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ + ArgBaseProcessor: block.ArgBaseProcessor{ Accounts: accntAdapter, ForkDetector: &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index bc10f9381ac..eb54df015c8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -494,7 +494,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { }, } - argumentsBase := &block.ArgBaseProcessor{ + argumentsBase := block.ArgBaseProcessor{ Accounts: tpn.AccntState, ForkDetector: tpn.ForkDetector, Hasher: TestHasher, @@ -510,7 +510,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - arguments.Core = &mock.ServiceContainerMock{} + argumentsBase.Core = &mock.ServiceContainerMock{} arguments := block.ArgMetaProcessor{ ArgBaseProcessor: argumentsBase, DataPool: tpn.MetaDataPool, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 34c253110e8..f1acd8c9f1f 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -80,7 +80,7 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { func (tpn *TestProcessorNode) initBlockProcessorWithSync() { var err error - argumentsBase := &block.ArgBaseProcessor{ + argumentsBase := block.ArgBaseProcessor{ Accounts: tpn.AccntState, ForkDetector: nil, Hasher: TestHasher, diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index d6aeda90451..ab26abd7b5d 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -32,7 +32,7 @@ type ArgBaseProcessor struct { // ArgShardProcessor holds all dependencies required by the process data factory in order to create // new instances of shard processor type ArgShardProcessor struct { - *ArgBaseProcessor + ArgBaseProcessor DataPool dataRetriever.PoolsHolder TxCoordinator process.TransactionCoordinator TxsPoolsCleaner process.PoolsCleaner @@ -41,6 +41,6 @@ type ArgShardProcessor struct { // ArgMetaProcessor holds all dependencies required by the process data factory in order to create // new instances of meta processor type ArgMetaProcessor struct { - *ArgBaseProcessor + ArgBaseProcessor DataPool dataRetriever.MetaPoolsHolder } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index a4fa9a0f6ee..47c1d84b689 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -316,7 +316,7 @@ func CreateMockArguments() blproc.ArgShardProcessor { nodesCoordinator, ) arguments := blproc.ArgShardProcessor{ - ArgBaseProcessor: &blproc.ArgBaseProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, ForkDetector: &mock.ForkDetectorMock{}, Hasher: &mock.HasherStub{}, diff --git a/process/block/export_test.go b/process/block/export_test.go index 4aef3943150..30d5717560b 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -61,7 +61,7 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo nodesCoordinator, ) arguments := ArgShardProcessor{ - ArgBaseProcessor: &ArgBaseProcessor{ + ArgBaseProcessor: ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, ForkDetector: &mock.ForkDetectorMock{}, Hasher: &mock.HasherMock{}, diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 650ffecfb0b..d9d93a7d322 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -24,7 +24,7 @@ func CreateMockMetaArguments() blproc.ArgMetaProcessor { mdp := initMetaDataPool() shardCoordinator := mock.NewOneShardCoordinatorMock() arguments := blproc.ArgMetaProcessor{ - ArgBaseProcessor: &blproc.ArgBaseProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, ForkDetector: &mock.ForkDetectorMock{}, Hasher: &mock.HasherStub{}, From 3ccc0d5f71c600ebab62143baba3e8a6a3cb5c6c Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Fri, 4 Oct 2019 11:37:21 +0300 Subject: [PATCH 203/234] * Fixed bad renaming after refactor has been used --- core/computers.go | 31 +++--- core/computers_test.go | 56 +++++++--- process/block/baseProcess.go | 8 +- process/block/displayBlock.go | 2 +- process/block/export_test.go | 12 -- process/block/interceptedBlockHeader.go | 10 +- process/block/interceptedMetaBlockHeader.go | 8 +- process/block/metablock.go | 24 ++-- process/block/metablock_test.go | 28 ++--- process/block/shardblock.go | 4 +- process/block/shardblock_test.go | 37 ++++--- process/mock/headerHandlerStub.go | 117 -------------------- process/throttle/block.go | 4 +- process/throttle/block_test.go | 10 +- 14 files changed, 125 insertions(+), 226 deletions(-) delete mode 100644 process/mock/headerHandlerStub.go diff --git a/core/computers.go b/core/computers.go index c6c7b47c08f..e06d1742b98 100644 --- a/core/computers.go +++ b/core/computers.go @@ -1,32 +1,33 @@ package core -import ( - "bytes" -) - -// Max returns the maximum number between two given -func Max(a int32, b int32) int32 { +// MaxInt32 returns the maximum number between two given +func MaxInt32(a int32, b int32) int32 { if a > b { return a } return b } -// Min returns the minimum number between two given -func Min(a int32, b int32) int32 { +// MinInt32 returns the minimum number between two given +func MinInt32(a int32, b int32) int32 { if a < b { return a } return b } -// IsHashInList signals if the given hash exists in the given list of hashes -func IsHashInList(hash []byte, hashes [][]byte) bool { - for i := 0; i < len(hashes); i++ { - if bytes.Equal(hash, hashes[i]) { - return true - } +// MaxUint32 returns the maximum number between two given +func MaxUint32(a uint32, b uint32) uint32 { + if a > b { + return a } + return b +} - return false +// MinUint32 returns the minimum number between two given +func MinUint32(a uint32, b uint32) uint32 { + if a < b { + return a + } + return b } diff --git a/core/computers_test.go b/core/computers_test.go index 3a08d506bab..8b26be785c9 100644 --- a/core/computers_test.go +++ b/core/computers_test.go @@ -7,26 +7,50 @@ import ( "github.com/stretchr/testify/assert" ) -func TestMaxShouldReturnA(t *testing.T) { - a := int32(11) - b := int32(10) - assert.Equal(t, a, core.Max(a, b)) +func TestMaxInt32ShouldReturnA(t *testing.T) { + a := int32(-1) + b := int32(-2) + assert.Equal(t, a, core.MaxInt32(a, b)) } -func TestMaxShouldReturnB(t *testing.T) { - a := int32(10) - b := int32(11) - assert.Equal(t, b, core.Max(a, b)) +func TestMaxInt32ShouldReturnB(t *testing.T) { + a := int32(-2) + b := int32(-1) + assert.Equal(t, b, core.MaxInt32(a, b)) } -func TestMinShouldReturnB(t *testing.T) { - a := int32(11) - b := int32(10) - assert.Equal(t, b, core.Min(a, b)) +func TestMinInt32ShouldReturnB(t *testing.T) { + a := int32(-1) + b := int32(-2) + assert.Equal(t, b, core.MinInt32(a, b)) } -func TestMinShouldReturnA(t *testing.T) { - a := int32(10) - b := int32(11) - assert.Equal(t, a, core.Min(a, b)) +func TestMinInt32ShouldReturnA(t *testing.T) { + a := int32(-2) + b := int32(-1) + assert.Equal(t, a, core.MinInt32(a, b)) +} + +func TestMaxUint32ShouldReturnA(t *testing.T) { + a := uint32(11) + b := uint32(10) + assert.Equal(t, a, core.MaxUint32(a, b)) +} + +func TestMaxUint32ShouldReturnB(t *testing.T) { + a := uint32(10) + b := uint32(11) + assert.Equal(t, b, core.MaxUint32(a, b)) +} + +func TestMinUint32ShouldReturnB(t *testing.T) { + a := uint32(11) + b := uint32(10) + assert.Equal(t, b, core.MinUint32(a, b)) +} + +func TestMinUint32ShouldReturnA(t *testing.T) { + a := uint32(10) + b := uint32(11) + assert.Equal(t, a, core.MinUint32(a, b)) } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 17cd7ee2b4f..8413b239092 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -86,7 +86,7 @@ func (bp *baseProcessor) RevertAccountState() { } } -// AddLastNotarizedHdr adds the last notarized hdr +// AddLastNotarizedHdr adds the last notarized header func (bp *baseProcessor) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { bp.mutNotarizedHdrs.Lock() bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) @@ -210,7 +210,7 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } //TODO: add verification if rand seed was correctly computed add other verification - //TODO: check here if the 2 hdr blocks were correctly signed and the consensus group was correctly elected + //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected if prevHdr.GetRound() >= currHdr.GetRound() { log.Debug(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", currHdr.GetShardID(), prevHdr.GetRound(), currHdr.GetRound())) @@ -355,8 +355,8 @@ func (bp *baseProcessor) getLastNotarizedHdr(shardId uint32) (data.HeaderHandler } // SetLastNotarizedHeadersSlice sets the headers blocks in notarizedHdrs for every shard -// This is done when starting a new epoch so metachain can use it when validating next shard hdr blocks -// and shard can validate the next meta hdr +// This is done when starting a new epoch so metachain can use it when validating next shard header blocks +// and shard can validate the next meta header func (bp *baseProcessor) setLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { //TODO: protect this to be called only once at genesis time //TODO: do this on constructor as it is a must to for blockprocessor to work diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 285556f6635..24e8db47eed 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -229,7 +229,7 @@ func DisplayLastNotarized( shardId uint32) { if lastNotarizedHdrForShard == nil || lastNotarizedHdrForShard.IsInterfaceNil() { - log.Error("last notarized hdr for shard is nil") + log.Error("last notarized header for shard is nil") return } diff --git a/process/block/export_test.go b/process/block/export_test.go index c0496fe2139..65a18b778e1 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -315,15 +315,3 @@ func (sp *shardProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler sp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } - -func (sp *shardProcessor) SetMissingHdrsForCurrentBlock(missingHdrs uint32) { - sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - sp.hdrsForCurrBlock.missingHdrs = missingHdrs - sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() -} - -func (sp *shardProcessor) SetMissingFinalHdrsForCurrentBlock(missingFinalHdrs uint32) { - sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - sp.hdrsForCurrBlock.missingFinalHdrs = missingFinalHdrs - sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() -} diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index baadd92362d..b1493158f5b 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -38,17 +38,17 @@ func NewInterceptedHeader( } } -// SetHash sets the hash of this hdr. The hash will also be the ID of this object +// SetHash sets the hash of this header. The hash will also be the ID of this object func (inHdr *InterceptedHeader) SetHash(hash []byte) { inHdr.hash = hash } -// Hash gets the hash of this hdr +// Hash gets the hash of this header func (inHdr *InterceptedHeader) Hash() []byte { return inHdr.hash } -// Shard returns the shard ID for which this hdr is addressed +// Shard returns the shard ID for which this header is addressed func (inHdr *InterceptedHeader) Shard() uint32 { return inHdr.ShardId } @@ -63,7 +63,7 @@ func (inHdr *InterceptedHeader) GetUnderlyingObject() interface{} { return inHdr.Header } -// IntegrityAndValidity checks the integrity and validity of a block hdr wrapper +// IntegrityAndValidity checks the integrity and validity of a block header wrapper func (inHdr *InterceptedHeader) IntegrityAndValidity(coordinator sharding.Coordinator) error { err := inHdr.Integrity(coordinator) if err != nil { @@ -144,7 +144,7 @@ func (inHdr *InterceptedHeader) VerifySig() error { return err } - // get marshalled block hdr without signature and bitmap + // get marshalled block header without signature and bitmap // as this is the message that was signed headerCopy := *inHdr.Header headerCopy.Signature = nil diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index 010606a2858..b0fbcdf0227 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -38,12 +38,12 @@ func NewInterceptedMetaHeader( } } -// SetHash sets the hash of this hdr. The hash will also be the ID of this object +// SetHash sets the hash of this header. The hash will also be the ID of this object func (imh *InterceptedMetaHeader) SetHash(hash []byte) { imh.hash = hash } -// Hash gets the hash of this hdr +// Hash gets the hash of this header func (imh *InterceptedMetaHeader) Hash() []byte { return imh.hash } @@ -53,7 +53,7 @@ func (imh *InterceptedMetaHeader) GetMetaHeader() *block.MetaBlock { return imh.MetaBlock } -// IntegrityAndValidity checks the integrity and validity of a block hdr wrapper +// IntegrityAndValidity checks the integrity and validity of a block header wrapper func (imh *InterceptedMetaHeader) IntegrityAndValidity(coordinator sharding.Coordinator) error { err := imh.Integrity(coordinator) if err != nil { @@ -136,7 +136,7 @@ func (imh *InterceptedMetaHeader) VerifySig() error { return err } - // get marshalled block hdr without signature and bitmap + // get marshalled block header without signature and bitmap // as this is the message that was signed headerCopy := *imh.MetaBlock headerCopy.Signature = nil diff --git a/process/block/metablock.go b/process/block/metablock.go index 676a5d88958..8a10f9a8d34 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -147,7 +147,7 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { if err == process.ErrBlockHashDoesNotMatch { - log.Info(fmt.Sprintf("requested missing meta hdr with hash %s for shard %d\n", + log.Info(fmt.Sprintf("requested missing meta header with hash %s for shard %d\n", core.ToB64(headerHandler.GetPrevHash()), headerHandler.GetShardID())) @@ -743,7 +743,7 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high break } - // found a hdr with the next nonce + // found a header with the next nonce tmpHdr := sortedHdrPerShard[shId][i] if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) @@ -841,7 +841,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return true, hdrIds } - // found a hdr with the next nonce + // found a header with the next nonce tmpHdr := sortedShardHdrs[i] if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) @@ -862,7 +862,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return false, nil } -// receivedHeader is a call back function which is called when a new hdr +// receivedHeader is a call back function which is called when a new header // is added in the headers pool func (mp *metaProcessor) receivedHeader(headerHash []byte) { shardHdrsCache := mp.dataPool.ShardHeaders() @@ -885,7 +885,7 @@ func (mp *metaProcessor) receivedHeader(headerHash []byte) { return } - log.Debug(fmt.Sprintf("received hdr with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", core.ToB64(headerHash), header.GetNonce())) @@ -925,7 +925,7 @@ func (mp *metaProcessor) receivedHeader(headerHash []byte) { } // requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the -// current block. It requests the nextKValidity headers greater than the highest shard hdr, for each shard, related +// current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related // to the block which should be processed func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { requestedBlockHeaders := uint32(0) @@ -1071,7 +1071,7 @@ func (mp *metaProcessor) createShardInfo( log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) - // save last committed hdr for verification + // save last committed header for verification mp.mutNotarizedHdrs.RLock() if mp.notarizedHdrs == nil { mp.mutNotarizedHdrs.RUnlock() @@ -1178,9 +1178,9 @@ func (mp *metaProcessor) createPeerInfo() ([]block.PeerData, error) { return peerInfo, nil } -// CreateBlockHeader creates a miniblock hdr list given a block body +// CreateBlockHeader creates a miniblock header list given a block body func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round uint64, haveTime func() bool) (data.HeaderHandler, error) { - log.Debug(fmt.Sprintf("started creating block hdr in round %d\n", round)) + log.Debug(fmt.Sprintf("started creating block header in round %d\n", round)) // TODO: add PrevRandSeed and RandSeed when BLS signing is completed header := &block.MetaBlock{ ShardInfo: make([]block.ShardData, 0), @@ -1210,7 +1210,7 @@ func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round u mp.blockSizeThrottler.Add( round, - uint32(core.Max(int32(header.ItemsInBody()), int32(header.ItemsInHeader())))) + core.MaxUint32(header.ItemsInBody(), header.ItemsInHeader())) return header, nil } @@ -1233,7 +1233,7 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( mrsData := make(map[uint32][]byte) mrsTxs := make(map[string][][]byte) - // send headers which can validate the current hdr + // send headers which can validate the current header return mrsData, mrsTxs, nil } @@ -1357,7 +1357,7 @@ func (mp *metaProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { return &body } -// DecodeBlockHeader method decodes block hdr from a given byte array +// DecodeBlockHeader method decodes block header from a given byte array func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { if dta == nil { return nil diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index e614803bf4d..271413ff620 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -901,7 +901,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, removeHdrWasCalled) assert.True(t, forkDetectorAddCalled) - //this should sleep as there is an async call to display current hdr and block in CommitBlock + //this should sleep as there is an async call to display current header and block in CommitBlock time.Sleep(time.Second) } @@ -1440,7 +1440,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1464,7 +1464,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash1, headers[0]) pool.ShardHeaders().Put(hdrHash11, headers[1]) - // hdr shard 1 + // header shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1488,7 +1488,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash2, headers[2]) pool.ShardHeaders().Put(hdrHash22, headers[3]) - // hdr shard 2 + // header shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1608,7 +1608,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1632,7 +1632,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash1, headers[0]) pool.ShardHeaders().Put(hdrHash11, headers[1]) - // hdr shard 1 + // header shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1656,7 +1656,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash2, headers[2]) pool.ShardHeaders().Put(hdrHash22, headers[3]) - // hdr shard 2 + // header shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) headers = append(headers, &block.Header{ Round: 10, @@ -1817,7 +1817,7 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -1847,13 +1847,13 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { shDataPrev := block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - // test hdr not in pool and defer called + // test header not in pool and defer called err := mp.SaveLastNotarizedHeader(metaHdr) assert.Equal(t, process.ErrMissingHeader, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) - // wrong hdr type in pool and defer called + // wrong header type in pool and defer called pool.ShardHeaders().Put(currHash, metaHdr) pool.ShardHeaders().Put(prevHash, prevHdr) @@ -1911,7 +1911,7 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2131,7 +2131,7 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2245,7 +2245,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, @@ -2354,7 +2354,7 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) prevHdr := &block.Header{ Round: 10, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 462da745f06..7a18d9e0757 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1554,7 +1554,7 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round sp.blockSizeThrottler.Add( round, - uint32(core.Max(int32(header.ItemsInBody()), int32(header.ItemsInHeader())))) + core.MaxUint32(header.ItemsInBody(), header.ItemsInHeader())) return header, nil } @@ -1701,7 +1701,7 @@ func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( ) int32 { mbSpaceRemainedInBlock := int32(maxItemsInBlock) - int32(itemsAddedInBlock) mbSpaceRemainedInCache := int32(core.MaxMiniBlocksInBlock) - int32(miniBlocksAddedInBlock) - maxMbSpaceRemained := core.Min(mbSpaceRemainedInBlock, mbSpaceRemainedInCache) + maxMbSpaceRemained := core.MinInt32(mbSpaceRemainedInBlock, mbSpaceRemainedInCache) return maxMbSpaceRemained } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 7a5758a5229..28c273114e7 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1721,7 +1721,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { txHash := []byte("txHash") rootHash := []byte("root hash") - hdrHash := []byte("hdr hash") + hdrHash := []byte("header hash") hdr := &block.Header{ Nonce: 1, Round: 1, @@ -1803,7 +1803,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { txHash := []byte("tx_hash1") rootHash := []byte("root hash") - hdrHash := []byte("hdr hash") + hdrHash := []byte("header hash") randSeed := []byte("rand seed") prevHdr := &block.Header{ @@ -1897,7 +1897,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { txHash := []byte("tx_hash1") rootHash := []byte("root hash") - hdrHash := []byte("hdr hash") + hdrHash := []byte("header hash") randSeed := []byte("rand seed") prevHdr := &block.Header{ @@ -2115,7 +2115,7 @@ func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { } hasher.ComputeCalled = func(s string) []byte { if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") + return []byte("header hash") } if s == "txBlockBodyMarshalized" { return []byte("tx block body hash") @@ -2542,13 +2542,16 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin miniBlockHash1 := []byte("miniblock hash 1 found in cache") - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - } - }, - } + metaBlock := &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: []block.ShardData{ + block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + block.ShardMiniBlockHeader{Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, + }}, + }} //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") @@ -3179,7 +3182,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { //put the existing headers inside datapool - //hdr shard 0 + //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3288,7 +3291,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { RandSeed: prevRandSeed} notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - //hdr shard 0 + //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3313,7 +3316,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { mbHeaders := make([]block.MiniBlockHeader, 0) blockHeader := &block.Header{} - // test hdr not in pool and defer called + // test header not in pool and defer called processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) @@ -3328,7 +3331,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) assert.Equal(t, 0, len(processedMetaHdrs)) - // wrong hdr type in pool and defer called + // wrong header type in pool and defer called dataPool.MetaBlocks().Put(currHash, shardHdr) sp.SetHdrForCurrentBlock(currHash, shardHdr, true) @@ -3480,7 +3483,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes miniBlocks := make([]block.MiniBlock, 0) miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //hdr shard 0 + //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, @@ -3615,7 +3618,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin miniBlocks := make([]block.MiniBlock, 0) miniBlocks = append(miniBlocks, miniblock1, miniblock2) - //hdr shard 0 + //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ Round: 10, diff --git a/process/mock/headerHandlerStub.go b/process/mock/headerHandlerStub.go deleted file mode 100644 index b35061cb422..00000000000 --- a/process/mock/headerHandlerStub.go +++ /dev/null @@ -1,117 +0,0 @@ -package mock - -type HeaderHandlerStub struct { - GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 -} - -func (hhs *HeaderHandlerStub) GetEpoch() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetRootHash() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPrevHash() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetRandSeed() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetSignature() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetTimeStamp() uint64 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetTxCount() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetNonce(n uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetEpoch(e uint32) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRound(r uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetTimeStamp(ts uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRootHash(rHash []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPrevHash(pvHash []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(pvRandSeed []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRandSeed(randSeed []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(pkbm []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetSignature(sg []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetTxCount(txCount uint32) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) ItemsInBody() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) ItemsInHeader() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetShardID() uint32 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetNonce() uint64 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetRound() uint64 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 { - return hhs.GetMiniBlockHeadersWithDstCalled(destId) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (hhs *HeaderHandlerStub) IsInterfaceNil() bool { - if hhs == nil { - return true - } - return false -} diff --git a/process/throttle/block.go b/process/throttle/block.go index bf7c7fe553e..f5f37147c98 100644 --- a/process/throttle/block.go +++ b/process/throttle/block.go @@ -112,7 +112,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenSucceed(lastActionMaxItems uint32) return noOfMaxItemsUsedWithoutSucceed } - increasedNoOfItems := uint32(core.Max(1, int32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor))) + increasedNoOfItems := core.MaxUint32(1, uint32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor)) return lastActionMaxItems + increasedNoOfItems } @@ -136,7 +136,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenNotSucceed(lastActionMaxItems uint3 return noOfMaxItemsUsedWithSucceed } - decreasedNoOfItems := uint32(core.Max(1, int32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor))) + decreasedNoOfItems := core.MaxUint32(1, uint32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor)) return lastActionMaxItems - decreasedNoOfItems } diff --git a/process/throttle/block_test.go b/process/throttle/block_test.go index 732ca9c29e3..de7527c3680 100644 --- a/process/throttle/block_test.go +++ b/process/throttle/block_test.go @@ -134,7 +134,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToMinItemsInBlockWhen func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhenLastActionNotSucceed(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - lastActionMaxItems1 := uint32(core.Max(12000, process.MinItemsInBlock)) + lastActionMaxItems1 := core.MaxUint32(12000, process.MinItemsInBlock) bst.SetMaxItems(lastActionMaxItems1) bst.Add(2, 0) bst.SetSucceed(2, false) @@ -143,7 +143,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhen assert.Equal(t, decreasedValue, bst.MaxItemsToAdd()) bst.SetSucceed(2, true) - lastActionMaxItems2 := uint32(core.Max(14000, process.MinItemsInBlock)) + lastActionMaxItems2 := core.MaxUint32(14000, process.MinItemsInBlock) bst.SetMaxItems(lastActionMaxItems2) bst.Add(3, 0) bst.SetSucceed(3, false) @@ -179,12 +179,12 @@ func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldReturnNoOfMaxItemsUsedWit func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItemsWithAtLeastOneUnit(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithoutSucceed := uint32(core.Min(process.MinItemsInBlock+1, process.MaxItemsInBlock)) + maxItemsUsedWithoutSucceed := core.MinUint32(process.MinItemsInBlock+1, process.MaxItemsInBlock) bst.SetMaxItems(maxItemsUsedWithoutSucceed) bst.Add(2, 0) maxItemsWhenSucceed := bst.GetMaxItemsWhenSucceed(process.MinItemsInBlock) - assert.Equal(t, uint32(core.Min(process.MinItemsInBlock+1, process.MaxItemsInBlock)), maxItemsWhenSucceed) + assert.Equal(t, core.MinUint32(process.MinItemsInBlock+1, process.MaxItemsInBlock), maxItemsWhenSucceed) } func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItems(t *testing.T) { @@ -258,7 +258,7 @@ func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItemsWithAt func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItems(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithSucceed := uint32(core.Max(7000, process.MinItemsInBlock)) + maxItemsUsedWithSucceed := core.MaxUint32(7000, process.MinItemsInBlock) bst.SetMaxItems(maxItemsUsedWithSucceed) bst.Add(2, 0) bst.SetSucceed(2, true) From 7d719de7f478dca6ca1de4756d1308698a01d953 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 4 Oct 2019 13:57:15 +0300 Subject: [PATCH 204/234] EN-4247 - fix after review --- .../spos/commonSubround/subroundStartRound.go | 3 +- core/indexer/data.go | 1 + core/indexer/elasticsearch.go | 4 +-- core/indexer/interface.go | 2 +- core/indexer/nilIndexer.go | 2 +- core/mock/indexerMock.go | 2 +- node/mock/indexerMock.go | 2 +- process/block/export_test.go | 9 ++++++ process/block/shardblock.go | 29 +++++++++++++++++-- process/block/shardblock_test.go | 15 ++++++++++ process/mock/indexerMock.go | 2 +- 11 files changed, 60 insertions(+), 11 deletions(-) diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 8b135704c9d..886e3d48359 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -203,13 +203,14 @@ func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { round := sr.Rounder().Index() roundInfo := indexer.RoundInfo{ + Index: uint64(round), SignersIndexes: signersIndexes, BlockWasProposed: false, ShardId: shardId, Timestamp: time.Duration(sr.RoundTimeStamp.Unix()), } - go sr.indexer.SaveRoundInfo(round, roundInfo) + go sr.indexer.SaveRoundInfo(roundInfo) } func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/core/indexer/data.go b/core/indexer/data.go index 78cecb5faf2..2bce4546333 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -52,6 +52,7 @@ type ValidatorsPublicKeys struct { // RoundInfo is a structure containing block signers and shard id type RoundInfo struct { + Index uint64 `json:"-"` SignersIndexes []uint64 `json:"signersIndexes"` BlockWasProposed bool `json:"blockWasProposed"` ShardId uint32 `json:"shardId"` diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 14281de96df..f7248756913 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -241,7 +241,7 @@ func (ei *elasticIndexer) SaveBlock( } // SaveRoundInfo will save data about a round on elastic search -func (ei *elasticIndexer) SaveRoundInfo(round int64, roundInfo RoundInfo) { +func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { var buff bytes.Buffer marshalizedRoundInfo, err := ei.marshalizer.Marshal(roundInfo) @@ -255,7 +255,7 @@ func (ei *elasticIndexer) SaveRoundInfo(round int64, roundInfo RoundInfo) { req := esapi.IndexRequest{ Index: roundIndex, - DocumentID: strconv.Itoa(int(round)), + DocumentID: strconv.Itoa(int(roundInfo.Index)), Body: bytes.NewReader(buff.Bytes()), Refresh: "true", } diff --git a/core/indexer/interface.go b/core/indexer/interface.go index 27936991cf4..ad29e9aa896 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,7 +9,7 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) - SaveRoundInfo(round int64, roundInfo RoundInfo) + SaveRoundInfo(roundInfo RoundInfo) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go index fceed26cce5..a63e08462c9 100644 --- a/core/indexer/nilIndexer.go +++ b/core/indexer/nilIndexer.go @@ -20,7 +20,7 @@ func (ni *NilIndexer) SaveBlock(body data.BodyHandler, header data.HeaderHandler } // SaveRoundInfo will do nothing -func (ni *NilIndexer) SaveRoundInfo(round int64, info RoundInfo) { +func (ni *NilIndexer) SaveRoundInfo(info RoundInfo) { return } diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 4f1d5ae5059..7f43e764569 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -20,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { panic("implement me") } diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go index 4f1d5ae5059..7f43e764569 100644 --- a/node/mock/indexerMock.go +++ b/node/mock/indexerMock.go @@ -20,7 +20,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { panic("implement me") } diff --git a/process/block/export_test.go b/process/block/export_test.go index 17ae9410ae3..651a70c4834 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -310,3 +310,12 @@ func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHa func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) } + +func (sp *shardProcessor) CalculateRoundDuration( + lastBlockTimestamp uint64, + currentBlockTimestamp uint64, + lastBlockRound uint64, + currentBlockRound uint64, +) uint64 { + return sp.calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 6acdf763ffe..d542ba699b8 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -530,6 +530,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( signersIndexes := sp.nodesCoordinator.GetValidatorsIndexes(pubKeys) roundInfo := indexer.RoundInfo{ + Index: header.GetRound(), SignersIndexes: signersIndexes, BlockWasProposed: true, ShardId: shardId, @@ -537,7 +538,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( } go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) - go sp.core.Indexer().SaveRoundInfo(int64(header.GetRound()), roundInfo) + go sp.core.Indexer().SaveRoundInfo(roundInfo) if lastBlockHeader == nil { return @@ -546,6 +547,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( lastBlockRound := lastBlockHeader.GetRound() currentBlockRound := header.GetRound() + roundDuration := sp.calculateRoundDuration(lastBlockHeader.GetTimeStamp(), header.GetTimeStamp(), lastBlockRound, currentBlockRound) for i := lastBlockRound + 1; i < currentBlockRound; i++ { publicKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(lastBlockHeader.GetRandSeed(), i, shardId) if err != nil { @@ -553,15 +555,36 @@ func (sp *shardProcessor) indexBlockIfNeeded( } signersIndexes = sp.nodesCoordinator.GetValidatorsIndexes(publicKeys) roundInfo = indexer.RoundInfo{ + Index: i, SignersIndexes: signersIndexes, BlockWasProposed: true, ShardId: shardId, - Timestamp: time.Duration(header.GetTimeStamp() - (currentBlockRound - i)), + Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), } - go sp.core.Indexer().SaveRoundInfo(int64(i), roundInfo) + go sp.core.Indexer().SaveRoundInfo(roundInfo) } +} + +func (sp *shardProcessor) calculateRoundDuration( + lastBlockTimestamp uint64, + currentBlockTimestamp uint64, + lastBlockRound uint64, + currentBlockRound uint64, +) uint64 { + if lastBlockTimestamp >= currentBlockTimestamp { + log.Error("last block timestamp is greater or equals than current block timestamp") + return 0 + } + if lastBlockRound >= currentBlockRound { + log.Error("last block round is greater or equals than current block round") + return 0 + } + + diffTimeStamp := currentBlockTimestamp - lastBlockTimestamp + diffRounds := currentBlockRound - lastBlockRound + return diffTimeStamp / diffRounds } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index d2750dbc509..4154123adbb 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -4191,3 +4191,18 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { assert.Nil(t, err) assert.True(t, sp.IsMiniBlockProcessed(metaHash, testMBHash)) } + +func TestNewShardProcessor_CalculateRoundDuration(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments() + sp, _ := blproc.NewShardProcessor(arguments) + lastBlockTimestamp := uint64(80) + currentBlockTimestamp := uint64(100) + lastBlockRound := uint64(5) + currentBlockRound := uint64(10) + expectedRoundDuration := uint64(4) + + roundDuration := sp.CalculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) + assert.Equal(t, expectedRoundDuration, roundDuration) +} diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 2ae76a97b9b..2d2552a6da5 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -21,7 +21,7 @@ func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } -func (im *IndexerMock) SaveRoundInfo(round int64, roundInfo indexer.RoundInfo) { +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { return } From 2f8ec0272c002e94844c39b90033cac048c466db Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 4 Oct 2019 14:24:47 +0300 Subject: [PATCH 205/234] EN-4247 - fix after review --- core/indexer/elasticsearch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index f7248756913..ae8de78fd35 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -255,7 +255,7 @@ func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { req := esapi.IndexRequest{ Index: roundIndex, - DocumentID: strconv.Itoa(int(roundInfo.Index)), + DocumentID: strconv.FormatInt(int64(roundInfo.Index), 10), Body: bytes.NewReader(buff.Bytes()), Refresh: "true", } From 140578d836590e70e613b7769833a55465c485cf Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Fri, 4 Oct 2019 20:06:29 +0300 Subject: [PATCH 206/234] * Fixed after review --- core/computers.go | 8 +- process/block/export_test.go | 20 ++-- process/block/shardblock.go | 179 +++++++++++++++++-------------- process/block/shardblock_test.go | 20 ++-- process/common.go | 11 ++ process/common_test.go | 19 ++++ process/interface.go | 2 +- 7 files changed, 160 insertions(+), 99 deletions(-) diff --git a/core/computers.go b/core/computers.go index e06d1742b98..d04b880699f 100644 --- a/core/computers.go +++ b/core/computers.go @@ -1,6 +1,6 @@ package core -// MaxInt32 returns the maximum number between two given +// MaxInt32 returns the maximum of two given numbers func MaxInt32(a int32, b int32) int32 { if a > b { return a @@ -8,7 +8,7 @@ func MaxInt32(a int32, b int32) int32 { return b } -// MinInt32 returns the minimum number between two given +// MinInt32 returns the minimum of two given numbers func MinInt32(a int32, b int32) int32 { if a < b { return a @@ -16,7 +16,7 @@ func MinInt32(a int32, b int32) int32 { return b } -// MaxUint32 returns the maximum number between two given +// MaxUint32 returns the maximum of two given numbers func MaxUint32(a uint32, b uint32) uint32 { if a > b { return a @@ -24,7 +24,7 @@ func MaxUint32(a uint32, b uint32) uint32 { return b } -// MinUint32 returns the minimum number between two given +// MinUint32 returns the minimum of two given numbers func MinUint32(a uint32, b uint32) uint32 { if a < b { return a diff --git a/process/block/export_test.go b/process/block/export_test.go index 08856ae8b10..1e4b5b3db2e 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -44,8 +44,8 @@ func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint return sp.createMiniBlocks(noShards, maxItemsInBlock, round, haveTime) } -func (sp *shardProcessor) GetProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { - return sp.getProcessedMetaBlocksFromHeader(header) +func (sp *shardProcessor) GetOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { + return sp.getOrderedProcessedMetaBlocksFromHeader(header) } func (sp *shardProcessor) RemoveProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { @@ -245,8 +245,8 @@ func (sp *shardProcessor) GetHashAndHdrStruct(header data.HeaderHandler, hash [] return &hashAndHdr{header, hash} } -func (sp *shardProcessor) RequestFinalMissingHeaders() uint32 { - return sp.requestFinalMissingHeaders() +func (sp *shardProcessor) RequestMissingFinalityAttestingHeaders() uint32 { + return sp.requestMissingFinalityAttestingHeaders() } func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality() error { @@ -270,8 +270,8 @@ func (bp *baseProcessor) SetBlockSizeThrottler(blockSizeThrottler process.BlockS bp.blockSizeThrottler = blockSizeThrottler } -func (sp *shardProcessor) SetCurrHighestMetaHdrNonce(value uint64) { - sp.currHighestMetaHdrNonce = value +func (sp *shardProcessor) SetHighestHdrNonceForCurrentBlock(value uint64) { + sp.hdrsForCurrBlock.highestHdrNonce = value } func (sp *shardProcessor) DisplayLogInfo( @@ -324,3 +324,11 @@ func (sp *shardProcessor) CalculateRoundDuration( ) uint64 { return sp.calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) } + +func (sp *shardProcessor) CreateBlockStarted() { + sp.createBlockStarted() +} + +func (sp *shardProcessor) AddProcessedCrossMiniBlocksFromHeader(header *block.Header) error { + return sp.addProcessedCrossMiniBlocksFromHeader(header) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 92457724ea6..b5e3c76b3f8 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -32,10 +32,11 @@ type hdrInfo struct { } type hdrForBlock struct { - missingHdrs uint32 - missingFinalHdrs uint32 - mutHdrsForBlock sync.RWMutex - hdrHashAndInfo map[string]*hdrInfo + missingHdrs uint32 + missingFinalityAttestingHdrs uint32 + highestHdrNonce uint64 + mutHdrsForBlock sync.RWMutex + hdrHashAndInfo map[string]*hdrInfo } // shardProcessor implements shardProcessor interface and actually it tries to execute block @@ -44,9 +45,8 @@ type shardProcessor struct { dataPool dataRetriever.PoolsHolder metaBlockFinality int - chRcvAllMetaHdrs chan bool - hdrsForCurrBlock hdrForBlock - currHighestMetaHdrNonce uint64 + chRcvAllMetaHdrs chan bool + hdrsForCurrBlock hdrForBlock processedMiniBlocks map[string]map[string]struct{} mutProcessedMiniBlocks sync.RWMutex @@ -205,9 +205,9 @@ func (sp *shardProcessor) ProcessBlock( return err } - sp.CreateBlockStarted() + sp.createBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) - requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) + requestedMetaHdrs, requestedFinalityAttestingMetaHdrs := sp.requestMetaHeaders(header) if haveTime() < 0 { return process.ErrTimeIsOut @@ -218,14 +218,17 @@ func (sp *shardProcessor) ProcessBlock( return err } - if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) + if requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 { + log.Info(fmt.Sprintf("requested %d missing meta headers and %d finality attesting meta headers\n", + requestedMetaHdrs, + requestedFinalityAttestingMetaHdrs)) + err = sp.waitForMetaHdrHashes(haveTime()) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() missingHdrs := sp.hdrsForCurrBlock.missingHdrs sp.hdrsForCurrBlock.missingHdrs = 0 - sp.hdrsForCurrBlock.missingFinalHdrs = 0 + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() if requestedMetaHdrs > 0 { @@ -261,7 +264,7 @@ func (sp *shardProcessor) ProcessBlock( } }() - processedMetaHdrs, err := sp.getProcessedMetaBlocksFromMiniBlocks(body) + processedMetaHdrs, err := sp.getOrderedProcessedMetaBlocksFromMiniBlocks(body) if err != nil { return err } @@ -320,7 +323,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return err } - usedMetaHdrs, err := sp.sortHdrsForCurrentBlock(true) + usedMetaHdrs, err := sp.sortMetaHeadersForCurrentBlockByNonce(true) if err != nil { return err } @@ -352,7 +355,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error return process.ErrNilBlockHeader } - finalMetaHdrs, err := sp.sortHdrsForCurrentBlock(false) + finalityAttestingMetaHdrs, err := sp.sortMetaHeadersForCurrentBlockByNonce(false) if err != nil { return err } @@ -360,14 +363,14 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error lastVerifiedHdr := header // verify if there are "K" block after current to make this one final nextBlocksVerified := 0 - for _, metaHdr := range finalMetaHdrs { + for _, metaHdr := range finalityAttestingMetaHdrs { if nextBlocksVerified >= sp.metaBlockFinality { break } // found a header with the next nonce if metaHdr.Nonce == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) + err = sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { log.Debug(err.Error()) continue @@ -629,7 +632,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) - sp.CreateBlockStarted() + sp.createBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) @@ -722,7 +725,12 @@ func (sp *shardProcessor) CommitBlock( log.LogIfError(errNotCritical) } - processedMetaHdrs, err := sp.getProcessedMetaBlocksFromHeader(header) + processedMetaHdrs, err := sp.getOrderedProcessedMetaBlocksFromHeader(header) + if err != nil { + return err + } + + err = sp.addProcessedCrossMiniBlocksFromHeader(header) if err != nil { return err } @@ -824,9 +832,7 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( ownShIdHdrs := make([]data.HeaderHandler, 0) - sort.Slice(processedHdrs, func(i, j int) bool { - return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() - }) + process.SortHeadersByNonce(processedHdrs) for i := 0; i < len(processedHdrs); i++ { hdr, ok := processedHdrs[i].(*block.MetaBlock) @@ -846,9 +852,7 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( ownShIdHdrs = append(ownShIdHdrs, &block.Header{}) } - sort.Slice(ownShIdHdrs, func(i, j int) bool { - return ownShIdHdrs[i].GetNonce() < ownShIdHdrs[j].GetNonce() - }) + process.SortHeadersByNonce(ownShIdHdrs) ownShIdHdrsHashes := make([][]byte, 0) for i := 0; i < len(ownShIdHdrs); i++ { @@ -891,8 +895,8 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr return ownShIdHdr, nil } -// getProcessedMetaBlocksFromHeader returns all the meta blocks fully processed -func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { +// getOrderedProcessedMetaBlocksFromHeader returns all the meta blocks fully processed +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { if header == nil { return nil, process.ErrNilBlockHeader } @@ -904,11 +908,24 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaBlocks, processedCrossMiniBlocksHashes, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) if err != nil { return nil, err } + return processedMetaBlocks, nil +} + +func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header *block.Header) error { + if header == nil { + return process.ErrNilBlockHeader + } + + miniBlockHashes := make(map[int][]byte, 0) + for i := 0; i < len(header.MiniBlockHeaders); i++ { + miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash + } + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { if !hdrInfo.usedInBlock { @@ -918,23 +935,28 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return nil, process.ErrWrongTypeAssertion + return process.ErrWrongTypeAssertion } crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for hash := range crossMiniBlockHashes { - if processedCrossMiniBlocksHashes[hash] { - sp.addProcessedMiniBlock([]byte(metaBlockHash), []byte(hash)) + for key, miniBlockHash := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHash)] + if !ok { + continue } + + sp.addProcessedMiniBlock([]byte(metaBlockHash), miniBlockHash) + + delete(miniBlockHashes, key) } } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return processedMetaBlocks, nil + return nil } -// getProcessedMetaBlocks returns all the meta blocks fully processed -func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( +// getOrderedProcessedMetaBlocksFromMiniBlocks returns all the meta blocks fully processed ordered +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlocks( usedMiniBlocks []*block.MiniBlock, ) ([]data.HeaderHandler, error) { @@ -954,14 +976,14 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlocks( } log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) - processedMetaBlocks, _, err := sp.getProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) return processedMetaBlocks, err } -func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( miniBlockHashes map[int][]byte, -) ([]data.HeaderHandler, map[string]bool, error) { +) ([]data.HeaderHandler, error) { processedMetaHdrs := make([]data.HeaderHandler, 0) processedCrossMiniBlocksHashes := make(map[string]bool) @@ -975,7 +997,7 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return nil, nil, process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) @@ -1012,13 +1034,9 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromMiniBlockHashes( } sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - if len(processedMetaHdrs) > 1 { - sort.Slice(processedMetaHdrs, func(i, j int) bool { - return processedMetaHdrs[i].GetNonce() < processedMetaHdrs[j].GetNonce() - }) - } + process.SortHeadersByNonce(processedMetaHdrs) - return processedMetaHdrs, processedCrossMiniBlocksHashes, nil + return processedMetaHdrs, nil } func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { @@ -1102,33 +1120,36 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - if sp.hdrsForCurrBlock.missingHdrs > 0 || sp.hdrsForCurrBlock.missingFinalHdrs > 0 { + haveMissingMetaHeaders := sp.hdrsForCurrBlock.missingHdrs > 0 || sp.hdrsForCurrBlock.missingFinalityAttestingHdrs > 0 + if haveMissingMetaHeaders { hdrInfoForHash := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] - if hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) { + receivedMissingMetaHeader := hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) + if receivedMissingMetaHeader { hdrInfoForHash.hdr = metaBlock sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.Nonce + if metaBlock.Nonce > sp.hdrsForCurrBlock.highestHdrNonce { + sp.hdrsForCurrBlock.highestHdrNonce = metaBlock.Nonce } } + // attesting something if sp.hdrsForCurrBlock.missingHdrs == 0 { - missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs - sp.hdrsForCurrBlock.missingFinalHdrs = sp.requestFinalMissingHeaders() - if sp.hdrsForCurrBlock.missingFinalHdrs == 0 { - log.Info(fmt.Sprintf("received %d missing final meta headers\n", missingFinalHdrs)) + missingFinalityAttestingMetaHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders() + if sp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { + log.Info(fmt.Sprintf("received %d missing finality attesting meta headers\n", missingFinalityAttestingMetaHdrs)) } else { - log.Info(fmt.Sprintf("requested %d missing final meta headers\n", sp.hdrsForCurrBlock.missingFinalHdrs)) + log.Info(fmt.Sprintf("requested %d missing finality attesting meta headers\n", sp.hdrsForCurrBlock.missingFinalityAttestingHdrs)) } } - missingHdrs := sp.hdrsForCurrBlock.missingHdrs - missingFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs + missingMetaHdrs := sp.hdrsForCurrBlock.missingHdrs + missingFinalityAttestingMetaHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - allMissingNeededHdrsReceived := missingHdrs == 0 && missingFinalHdrs == 0 - if allMissingNeededHdrsReceived { + allMissingMetaHeadersReceived := missingMetaHdrs == 0 && missingFinalityAttestingMetaHdrs == 0 + if allMissingMetaHeadersReceived { sp.chRcvAllMetaHdrs <- true } } else { @@ -1149,13 +1170,13 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.txCoordinator.RequestMiniBlocks(metaBlock) } -// requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the +// requestMissingFinalityAttestingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the metaBlockFinality headers greater than the highest meta header related to the block // which should be processed -func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { +func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) - for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { - if sp.currHighestMetaHdrNonce == uint64(0) { + for i := sp.hdrsForCurrBlock.highestHdrNonce + 1; i <= sp.hdrsForCurrBlock.highestHdrNonce+uint64(sp.metaBlockFinality); i++ { + if sp.hdrsForCurrBlock.highestHdrNonce == uint64(0) { continue } @@ -1192,19 +1213,18 @@ func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint } if sp.hdrsForCurrBlock.missingHdrs == 0 { - sp.hdrsForCurrBlock.missingFinalHdrs = sp.requestFinalMissingHeaders() + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders() } requestedHdrs := sp.hdrsForCurrBlock.missingHdrs - requestedFinalHdrs := sp.hdrsForCurrBlock.missingFinalHdrs + requestedFinalityAttestingHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return requestedHdrs, requestedFinalHdrs + return requestedHdrs, requestedFinalityAttestingHdrs } func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Header) [][]byte { missingHeadersHashes := make([][]byte, 0) - sp.currHighestMetaHdrNonce = uint64(0) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(header.MetaBlockHashes); i++ { @@ -1220,8 +1240,8 @@ func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Hea sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} - if hdr.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = hdr.Nonce + if hdr.Nonce > sp.hdrsForCurrBlock.highestHdrNonce { + sp.hdrsForCurrBlock.highestHdrNonce = hdr.Nonce } } sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -1495,12 +1515,12 @@ func (sp *shardProcessor) createMiniBlocks( return nil, process.ErrNilTransactionPool } - destMeMiniBlocks, txs, hdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) if err != nil { log.Info(err.Error()) } - processedMetaHdrs, errNotCritical := sp.getProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks) + processedMetaHdrs, errNotCritical := sp.getOrderedProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } @@ -1510,16 +1530,16 @@ func (sp *shardProcessor) createMiniBlocks( return nil, err } - log.Info(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) + log.Info(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), nbTxs)) if len(destMeMiniBlocks) > 0 { miniBlocks = append(miniBlocks, destMeMiniBlocks...) } - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(nbTxs) maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( maxItemsInBlock, - uint32(len(destMeMiniBlocks))+hdrs, + uint32(len(destMeMiniBlocks))+nbHdrs, uint32(len(miniBlocks))) mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( @@ -1587,7 +1607,7 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) - header.MetaBlockHashes = sp.sortHdrsHashesForCurrentBlock(true) + header.MetaBlockHashes = sp.sortMetaHeaderHashesForCurrentBlockByNonce(true) sp.blockSizeThrottler.Add( round, @@ -1743,17 +1763,19 @@ func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( return maxMbSpaceRemained } -func (sp *shardProcessor) CreateBlockStarted() { +func (sp *shardProcessor) createBlockStarted() { sp.txCoordinator.CreateBlockStarted() sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() sp.hdrsForCurrBlock.missingHdrs = 0 - sp.hdrsForCurrBlock.missingFinalHdrs = 0 + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 + sp.hdrsForCurrBlock.highestHdrNonce = 0 sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } -func (sp *shardProcessor) sortHdrsForCurrentBlock(usedInBlock bool) ([]*block.MetaBlock, error) { +//TODO: remove bool parameter and give instead the set to sort +func (sp *shardProcessor) sortMetaHeadersForCurrentBlockByNonce(usedInBlock bool) ([]*block.MetaBlock, error) { hdrsForCurrentBlock := make([]*block.MetaBlock, 0) sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() @@ -1781,7 +1803,8 @@ func (sp *shardProcessor) sortHdrsForCurrentBlock(usedInBlock bool) ([]*block.Me return hdrsForCurrentBlock, nil } -func (sp *shardProcessor) sortHdrsHashesForCurrentBlock(usedInBlock bool) [][]byte { +//TODO: remove bool parameter and give instead the set to sort +func (sp *shardProcessor) sortMetaHeaderHashesForCurrentBlockByNonce(usedInBlock bool) [][]byte { hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index f6c0a1d0db1..d1e8da10797 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1344,8 +1344,8 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { assert.True(t, res) } -//-------- requestFinalMissingHeaders -func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { +//-------- requestMissingFinalityAttestingHeaders +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { t.Parallel() tdp := mock.NewPoolsHolderMock() @@ -1353,8 +1353,8 @@ func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - sp.SetCurrHighestMetaHdrNonce(1) - res := sp.RequestFinalMissingHeaders() + sp.SetHighestHdrNonceForCurrentBlock(1) + res := sp.RequestMissingFinalityAttestingHeaders() assert.Equal(t, res > 0, true) } @@ -2964,7 +2964,7 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - _, err := bp.GetProcessedMetaBlocksFromHeader(blockHeader) + err := bp.AddProcessedCrossMiniBlocksFromHeader(blockHeader) assert.Nil(t, err) //check WasMiniBlockProcessed for remaining metablocks @@ -3317,7 +3317,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { blockHeader := &block.Header{} // test header not in pool and defer called - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) @@ -3339,7 +3339,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { hashes = append(hashes, currHash) blockHeader = &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err = sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Equal(t, process.ErrWrongTypeAssertion, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) @@ -3365,7 +3365,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { hashes = append(hashes, prevHash) blockHeader = &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err = sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) @@ -3520,7 +3520,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes hashes = append(hashes, prevHash) blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) @@ -3660,7 +3660,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin hashes = append(hashes, prevHash) blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) assert.Equal(t, 2, len(processedMetaHdrs)) diff --git a/process/common.go b/process/common.go index 5cca6b46cd1..840667a59df 100644 --- a/process/common.go +++ b/process/common.go @@ -1,6 +1,8 @@ package process import ( + "sort" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -552,3 +554,12 @@ func getHeaderHashFromStorageWithNonce( return hash, nil } + +// SortHeadersByNonce will sort a given list of headers by nonce +func SortHeadersByNonce(headers []data.HeaderHandler) { + if len(headers) > 1 { + sort.Slice(headers, func(i, j int) bool { + return headers[i].GetNonce() < headers[j].GetNonce() + }) + } +} diff --git a/process/common_test.go b/process/common_test.go index c8baa447161..42bad1ed51f 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -2110,3 +2111,21 @@ func TestGetTransactionHandlerFromStorageShouldWork(t *testing.T) { assert.Nil(t, err) assert.Equal(t, txFromPool, tx) } + +func TestSortHeadersByNonceShouldWork(t *testing.T) { + headers := []data.HeaderHandler{ + &block.Header{Nonce: 3}, + &block.Header{Nonce: 2}, + &block.Header{Nonce: 1}, + } + + assert.Equal(t, uint64(3), headers[0].GetNonce()) + assert.Equal(t, uint64(2), headers[1].GetNonce()) + assert.Equal(t, uint64(1), headers[2].GetNonce()) + + process.SortHeadersByNonce(headers) + + assert.Equal(t, uint64(1), headers[0].GetNonce()) + assert.Equal(t, uint64(2), headers[1].GetNonce()) + assert.Equal(t, uint64(3), headers[2].GetNonce()) +} diff --git a/process/interface.go b/process/interface.go index 3c124ceebbb..f0b6c932381 100644 --- a/process/interface.go +++ b/process/interface.go @@ -399,7 +399,7 @@ type PoolsCleaner interface { IsInterfaceNil() bool } -// InterceptorThrottler can determine if the a new go routine can start +// InterceptorThrottler can determine if a new go routine can start type InterceptorThrottler interface { CanProcess() bool StartProcessing() From 1d6a9f432810a61bf4a4df1fab115e172d3f2c9f Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 7 Oct 2019 11:03:44 +0300 Subject: [PATCH 207/234] EN-4222 - fix after review --- process/block/baseProcess.go | 33 ++++------- process/block/metablock.go | 32 +++------- process/block/metablock_test.go | 102 ++++++++++++++++---------------- process/block/shardblock.go | 35 +++-------- 4 files changed, 80 insertions(+), 122 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 8413b239092..3dd6cf7e0bf 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -492,45 +492,38 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { } // checkProcessorNilParameters will check the imput parameters for nil values -func checkProcessorNilParameters( - accounts state.AccountsAdapter, - forkDetector process.ForkDetector, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - shardCoordinator sharding.Coordinator, - nodesCoordinator sharding.NodesCoordinator, - specialAddressHandler process.SpecialAddressHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, -) error { +func checkProcessorNilParameters(arguments ArgBaseProcessor) error { - if accounts == nil || accounts.IsInterfaceNil() { + if arguments.Accounts == nil || arguments.Accounts.IsInterfaceNil() { return process.ErrNilAccountsAdapter } - if forkDetector == nil || forkDetector.IsInterfaceNil() { + if arguments.ForkDetector == nil || arguments.ForkDetector.IsInterfaceNil() { return process.ErrNilForkDetector } - if hasher == nil || hasher.IsInterfaceNil() { + if arguments.Hasher == nil || arguments.Hasher.IsInterfaceNil() { return process.ErrNilHasher } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if arguments.Marshalizer == nil || arguments.Marshalizer.IsInterfaceNil() { return process.ErrNilMarshalizer } - if store == nil || store.IsInterfaceNil() { + if arguments.Store == nil || arguments.Store.IsInterfaceNil() { return process.ErrNilStorage } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if arguments.ShardCoordinator == nil || arguments.ShardCoordinator.IsInterfaceNil() { return process.ErrNilShardCoordinator } - if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + if arguments.NodesCoordinator == nil || arguments.NodesCoordinator.IsInterfaceNil() { return process.ErrNilNodesCoordinator } - if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + if arguments.SpecialAddressHandler == nil || arguments.SpecialAddressHandler.IsInterfaceNil() { return process.ErrNilSpecialAddressHandler } - if uint64Converter == nil || uint64Converter.IsInterfaceNil() { + if arguments.Uint64Converter == nil || arguments.Uint64Converter.IsInterfaceNil() { return process.ErrNilUint64Converter } + if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { + return process.ErrNilRequestHandler + } return nil } diff --git a/process/block/metablock.go b/process/block/metablock.go index a7f8af847b6..8786b86e56b 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -21,36 +21,21 @@ import ( // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder - + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder currHighestShardHdrsNonces map[uint32]uint64 requestedShardHdrsHashes map[string]bool allNeededShardHdrsFound bool mutRequestedShardHdrsHashes sync.RWMutex - - shardsHeadersNonce *sync.Map - - nextKValidity uint32 - - chRcvAllHdrs chan bool - - headersCounter *headersCounter + shardsHeadersNonce *sync.Map + nextKValidity uint32 + chRcvAllHdrs chan bool + headersCounter *headersCounter } // NewMetaProcessor creates a new metaProcessor object func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { - - err := checkProcessorNilParameters( - arguments.Accounts, - arguments.ForkDetector, - arguments.Hasher, - arguments.Marshalizer, - arguments.Store, - arguments.ShardCoordinator, - arguments.NodesCoordinator, - arguments.SpecialAddressHandler, - arguments.Uint64Converter) + err := checkProcessorNilParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -61,9 +46,6 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { if arguments.DataPool.ShardHeaders() == nil || arguments.DataPool.ShardHeaders().IsInterfaceNil() { return nil, process.ErrNilHeadersDataPool } - if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } blockSizeThrottler, err := throttle.NewBlockSizeThrottle() if err != nil { diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index d9d93a7d322..0711087bc7b 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" ) -func CreateMockMetaArguments() blproc.ArgMetaProcessor { +func createMockMetaArguments() blproc.ArgMetaProcessor { mdp := initMetaDataPool() shardCoordinator := mock.NewOneShardCoordinatorMock() arguments := blproc.ArgMetaProcessor{ @@ -147,7 +147,7 @@ func setLastNotarizedHdr( func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = nil be, err := blproc.NewMetaProcessor(arguments) @@ -158,7 +158,7 @@ func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = nil be, err := blproc.NewMetaProcessor(arguments) @@ -169,7 +169,7 @@ func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.ForkDetector = nil be, err := blproc.NewMetaProcessor(arguments) @@ -180,7 +180,7 @@ func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.ShardCoordinator = nil be, err := blproc.NewMetaProcessor(arguments) @@ -191,7 +191,7 @@ func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Hasher = nil be, err := blproc.NewMetaProcessor(arguments) @@ -202,7 +202,7 @@ func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Marshalizer = nil be, err := blproc.NewMetaProcessor(arguments) @@ -213,7 +213,7 @@ func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Store = nil be, err := blproc.NewMetaProcessor(arguments) @@ -224,7 +224,7 @@ func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.RequestHandler = nil be, err := blproc.NewMetaProcessor(arguments) @@ -235,7 +235,7 @@ func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, err := blproc.NewMetaProcessor(arguments) assert.Nil(t, err) @@ -247,7 +247,7 @@ func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} @@ -258,7 +258,7 @@ func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} @@ -270,7 +270,7 @@ func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) err := mp.ProcessBlock(&blockchain.MetaChain{}, &block.MetaBlock{}, nil, haveTime) @@ -280,7 +280,7 @@ func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} @@ -303,7 +303,7 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { RootHash: []byte("roothash"), } body := &block.MetaBlockBody{} - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ JournalLenCalled: journalLen, RevertToSnapshotCalled: revToSnapshot, @@ -319,7 +319,7 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{} @@ -334,7 +334,7 @@ func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ @@ -355,7 +355,7 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ @@ -395,7 +395,7 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState rootHashCalled := func() ([]byte, error) { return []byte("rootHashX"), nil } - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ JournalLenCalled: journalLen, RevertToSnapshotCalled: revertToSnapshot, @@ -421,7 +421,7 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { return nil @@ -469,7 +469,7 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { accounts.RevertToSnapshotCalled = func(snapshot int) error { return nil } - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) arguments.DataPool = mdp @@ -497,7 +497,7 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { return nil @@ -529,7 +529,7 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T return []byte("obj"), nil }, } - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = accounts arguments.Marshalizer = marshalizer mp, _ := blproc.NewMetaProcessor(arguments) @@ -559,7 +559,7 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.MetaBlockUnit, hdrUnit) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = accounts arguments.Store = store arguments.ForkDetector = &mock.ForkDetectorMock{ @@ -593,7 +593,7 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { body := &block.MetaBlockBody{} store := initStore() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = accounts arguments.Store = store arguments.DataPool = mdp @@ -623,7 +623,7 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { hasher := &mock.HasherStub{} store := initStore() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = mdp arguments.Accounts = accounts arguments.ForkDetector = fd @@ -682,7 +682,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.BlockHeaderUnit, blockHeaderUnit) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = mdp arguments.Accounts = accounts arguments.ForkDetector = fd @@ -724,7 +724,7 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { mdp := initMetaDataPool() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = mdp arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -752,7 +752,7 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = initMetaDataPool() arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -765,7 +765,7 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *tes func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = initMetaDataPool() arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -778,7 +778,7 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFail(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ JournalLenCalled: func() int { return 1 @@ -797,7 +797,7 @@ func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFai func TestMetaProcessor_CreateBlockHeaderShouldWork(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ JournalLenCalled: func() int { return 0 @@ -826,7 +826,7 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) return nil } - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: revToSnapshot, } @@ -842,7 +842,7 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -858,7 +858,7 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { t.Parallel() pool := mock.NewMetaPoolsHolderFake() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = pool arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -931,7 +931,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) MiniBlockHeaders: miniBlockHeaders3}) noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1000,7 +1000,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1107,7 +1107,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1263,7 +1263,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1388,7 +1388,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { func TestMetaProcessor_RestoreBlockIntoPoolsShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -1420,7 +1420,7 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { }, } - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.DataPool = pool arguments.Store = store mp, _ := blproc.NewMetaProcessor(arguments) @@ -1439,7 +1439,7 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1524,7 +1524,7 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1610,7 +1610,7 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1659,7 +1659,7 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1713,7 +1713,7 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1816,7 +1816,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -1915,7 +1915,7 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() arguments.Accounts = &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { assert.Fail(t, "revert should have not been called") @@ -2026,7 +2026,7 @@ func TestMetaProcessor_DecodeBlockBody(t *testing.T) { t.Parallel() marshalizerMock := &mock.MarshalizerMock{} - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) body := &block.MetaBlockBody{} message, err := marshalizerMock.Marshal(body) @@ -2043,7 +2043,7 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { t.Parallel() marshalizerMock := &mock.MarshalizerMock{} - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) hdr := &block.MetaBlock{} hdr.Nonce = 1 @@ -2066,7 +2066,7 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { t.Parallel() - arguments := CreateMockMetaArguments() + arguments := createMockMetaArguments() mp, _ := blproc.NewMetaProcessor(arguments) numberOfShards := uint32(4) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index b5e3c76b3f8..c2a1b46368b 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -42,35 +42,21 @@ type hdrForBlock struct { // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor - dataPool dataRetriever.PoolsHolder - metaBlockFinality int - - chRcvAllMetaHdrs chan bool - hdrsForCurrBlock hdrForBlock - + dataPool dataRetriever.PoolsHolder + metaBlockFinality int + chRcvAllMetaHdrs chan bool + hdrsForCurrBlock hdrForBlock processedMiniBlocks map[string]map[string]struct{} mutProcessedMiniBlocks sync.RWMutex - - core serviceContainer.Core - txCoordinator process.TransactionCoordinator - txCounter *transactionCounter - - txsPoolsCleaner process.PoolsCleaner + core serviceContainer.Core + txCoordinator process.TransactionCoordinator + txCounter *transactionCounter + txsPoolsCleaner process.PoolsCleaner } // NewShardProcessor creates a new shardProcessor object func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { - - err := checkProcessorNilParameters( - arguments.Accounts, - arguments.ForkDetector, - arguments.Hasher, - arguments.Marshalizer, - arguments.Store, - arguments.ShardCoordinator, - arguments.NodesCoordinator, - arguments.SpecialAddressHandler, - arguments.Uint64Converter) + err := checkProcessorNilParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -78,9 +64,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } if arguments.TxCoordinator == nil || arguments.TxCoordinator.IsInterfaceNil() { return nil, process.ErrNilTransactionCoordinator } From fe14867a3aa3f78ced4d6de8cab79f51f09bd8e1 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Mon, 7 Oct 2019 12:23:15 +0300 Subject: [PATCH 208/234] * Started implementation of local shard headers cache --- cmd/node/config/economics.toml | 11 +- cmd/node/config/p2p.toml | 2 +- process/block/baseProcess.go | 29 +++++ process/block/export_test.go | 2 +- process/block/metablock.go | 186 ++++++++++++++++----------------- process/block/shardblock.go | 62 ++++------- 6 files changed, 147 insertions(+), 145 deletions(-) diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index e90b4bce29f..d22149ba812 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -4,15 +4,12 @@ BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" [RewardsSettings] - RewardsValue = 1000 + RewardsValue = 10000000000000000000 CommunityPercentage = 0.10 LeaderPercentage = 0.50 BurnPercentage = 0.40 [FeeSettings] - MinGasPrice = 0 - MinGasLimitForTx = 5 - MinTxFee = 0 - - - + MinGasPrice = 1000000000000000 + MinGasLimitForTx = 1000 + MinTxFee = 1000000000000000000 diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 9c8f144138a..b60b68bf405 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -44,4 +44,4 @@ # #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap #phase but will accept connections and will do the network discovery if another peer connects to it - InitialPeerList = ["/ip4/127.0.0.1/tcp/10000/p2p/16Uiu2HAmAzokH1ozUF52Vy3RKqRfCMr9ZdNDkUQFEkXRs9DqvmKf"] + InitialPeerList = ["/ip4/127.0.0.1/tcp/10000/p2p/16Uiu2HAmAPaxcnVCfC7F59LTbBU2UwWNWfJHmuxwQEdDDyjmaSW4"] diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 8413b239092..c7870919a71 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -27,6 +27,24 @@ type hashAndHdr struct { hash []byte } +type nonceAndHashInfo struct { + hash []byte + nonce uint64 +} + +type hdrInfo struct { + usedInBlock bool + hdr data.HeaderHandler +} + +type hdrForBlock struct { + missingHdrs uint32 + missingFinalityAttestingHdrs uint32 + highestHdrNonce map[uint32]uint64 + mutHdrsForBlock sync.RWMutex + hdrHashAndInfo map[string]*hdrInfo +} + type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { @@ -41,6 +59,8 @@ type baseProcessor struct { uint64Converter typeConverters.Uint64ByteSliceConverter blockSizeThrottler process.BlockSizeThrottler + hdrsForCurrBlock hdrForBlock + mutNotarizedHdrs sync.RWMutex notarizedHdrs mapShardHeaders @@ -534,3 +554,12 @@ func checkProcessorNilParameters( return nil } + +func (bp *baseProcessor) createBlockStarted() { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.missingHdrs = 0 + bp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 + bp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + bp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} diff --git a/process/block/export_test.go b/process/block/export_test.go index 1e4b5b3db2e..df3abe95936 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -181,7 +181,7 @@ func (bp *baseProcessor) SetHasher(hasher hashing.Hasher) { func (mp *metaProcessor) SetNextKValidity(val uint32) { mp.mutRequestedShardHdrsHashes.Lock() - mp.nextKValidity = val + mp.shardBlockFinality = val mp.mutRequestedShardHdrsHashes.Unlock() } diff --git a/process/block/metablock.go b/process/block/metablock.go index 8a10f9a8d34..5f4bf43c15f 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -29,14 +29,9 @@ type metaProcessor struct { core serviceContainer.Core dataPool dataRetriever.MetaPoolsHolder - currHighestShardHdrsNonces map[uint32]uint64 - requestedShardHdrsHashes map[string]bool - allNeededShardHdrsFound bool - mutRequestedShardHdrsHashes sync.RWMutex - shardsHeadersNonce *sync.Map - nextKValidity uint32 + shardBlockFinality uint32 chRcvAllHdrs chan bool @@ -117,15 +112,15 @@ func NewMetaProcessor( headersCounter: NewHeaderCounter(), } - mp.requestedShardHdrsHashes = make(map[string]bool) + mp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) headerPool := mp.dataPool.ShardHeaders() headerPool.RegisterHandler(mp.receivedHeader) mp.chRcvAllHdrs = make(chan bool) - mp.nextKValidity = process.ShardBlockFinality - mp.allNeededShardHdrsFound = true + mp.shardBlockFinality = process.ShardBlockFinality mp.shardsHeadersNonce = &sync.Map{} @@ -174,22 +169,31 @@ func (mp *metaProcessor) ProcessBlock( mp.headersCounter.getNumShardMBHeadersTotalProcessed(), ) - requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) + mp.createBlockStarted() + requestedShardHdrs, requestedFinalityAttestingShardHdrs := mp.requestShardHeaders(header) if haveTime() < 0 { return process.ErrTimeIsOut } - if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) + haveMissingShardHeaders := requestedShardHdrs > 0 || requestedFinalityAttestingShardHdrs > 0 + if haveMissingShardHeaders { + log.Info(fmt.Sprintf("requested %d missing shard headers and %d finality attesting shard headers\n", + requestedShardHdrs, + requestedFinalityAttestingShardHdrs)) + err = mp.waitForBlockHeaders(haveTime()) - mp.mutRequestedShardHdrsHashes.Lock() - mp.allNeededShardHdrsFound = true - unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) - mp.mutRequestedShardHdrsHashes.Unlock() + + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + missingShardHdrs := mp.hdrsForCurrBlock.missingHdrs + mp.hdrsForCurrBlock.missingHdrs = 0 + mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + if requestedShardHdrs > 0 { - log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) + log.Info(fmt.Sprintf("received %d missing shard headers\n", requestedShardHdrs-missingShardHdrs)) } + if err != nil { return err } @@ -377,6 +381,7 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, // CreateBlockBody creates block body of metachain func (mp *metaProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + mp.createBlockStarted() mp.blockSizeThrottler.ComputeMaxItems() return &block.MetaBlockBody{}, nil } @@ -538,7 +543,7 @@ func (mp *metaProcessor) CommitBlock( mp.forkDetector.GetHighestFinalBlockNonce(), mp.shardCoordinator.SelfId())) - hdrsToAttestPreviousFinal := mp.nextKValidity + 1 + hdrsToAttestPreviousFinal := mp.shardBlockFinality + 1 mp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) err = chainHandler.SetCurrentBlockBody(body) @@ -739,7 +744,7 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high nextBlocksVerified := uint32(0) shId := lastVerifiedHdr.GetShardID() for i := 0; i < len(sortedHdrPerShard[shId]); i++ { - if nextBlocksVerified >= mp.nextKValidity { + if nextBlocksVerified >= mp.shardBlockFinality { break } @@ -756,7 +761,7 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high } } - if nextBlocksVerified < mp.nextKValidity { + if nextBlocksVerified < mp.shardBlockFinality { go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) return process.ErrHeaderNotFinal } @@ -837,7 +842,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr nextBlocksVerified := uint32(0) hdrIds := make([]uint32, 0) for i := 0; i < len(sortedShardHdrs); i++ { - if nextBlocksVerified >= mp.nextKValidity { + if nextBlocksVerified >= mp.shardBlockFinality { return true, hdrIds } @@ -855,7 +860,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr } } - if nextBlocksVerified >= mp.nextKValidity { + if nextBlocksVerified >= mp.shardBlockFinality { return true, hdrIds } @@ -864,86 +869,90 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr // receivedHeader is a call back function which is called when a new header // is added in the headers pool -func (mp *metaProcessor) receivedHeader(headerHash []byte) { - shardHdrsCache := mp.dataPool.ShardHeaders() - if shardHdrsCache == nil { +func (mp *metaProcessor) receivedHeader(shardBlockHash []byte) { + shardBlockPool := mp.dataPool.ShardHeaders() + if shardBlockPool == nil { return } - shardHdrsNoncesCache := mp.dataPool.HeadersNonces() - if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { - return - } - - obj, ok := shardHdrsCache.Peek(headerHash) + obj, ok := shardBlockPool.Peek(shardBlockHash) if !ok { return } - header, ok := obj.(data.HeaderHandler) + shardBlock, ok := obj.(*block.Header) if !ok { return } - log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", - core.ToB64(headerHash), - header.GetNonce())) + log.Debug(fmt.Sprintf("received shard block with hash %s and nonce %d from network\n", + core.ToB64(shardBlockHash), + shardBlock.Nonce)) - mp.mutRequestedShardHdrsHashes.Lock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - if !mp.allNeededShardHdrsFound { - if mp.requestedShardHdrsHashes[string(headerHash)] { - delete(mp.requestedShardHdrsHashes, string(headerHash)) + haveMissingShardHeaders := mp.hdrsForCurrBlock.missingHdrs > 0 || mp.hdrsForCurrBlock.missingFinalityAttestingHdrs > 0 + if haveMissingShardHeaders { + hdrInfoForHash := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardBlockHash)] + receivedMissingShardHeader := hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) + if receivedMissingShardHeader { + hdrInfoForHash.hdr = shardBlock + mp.hdrsForCurrBlock.missingHdrs-- - if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { - mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() + if shardBlock.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardBlock.ShardId] { + mp.hdrsForCurrBlock.highestHdrNonce[shardBlock.ShardId] = shardBlock.Nonce } } - lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqShardHdrsHashes == 0 { - requestedBlockHeaders := mp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - log.Info(fmt.Sprintf("received all final shard headers\n")) - areFinalAttestingHdrsInCache = true + if mp.hdrsForCurrBlock.missingHdrs == 0 { + missingFinalityAttestingShardHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingHeaders() + if mp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { + log.Info(fmt.Sprintf("received %d missing finality attesting shard headers\n", missingFinalityAttestingShardHdrs)) } else { - log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) + log.Info(fmt.Sprintf("requested %d missing finality attesting shard headers\n", mp.hdrsForCurrBlock.missingFinalityAttestingHdrs)) } } - mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache - - mp.mutRequestedShardHdrsHashes.Unlock() + missingShardHdrs := mp.hdrsForCurrBlock.missingHdrs + missingFinalityAttestingShardHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { + allMissingShardHeadersReceived := missingShardHdrs == 0 && missingFinalityAttestingShardHdrs == 0 + if allMissingShardHeadersReceived { mp.chRcvAllHdrs <- true } } else { - mp.mutRequestedShardHdrsHashes.Unlock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } } -// requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the -// current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related +// requestMissingFinalityAttestingHeaders requests the headers needed to accept the current selected headers for processing the +// current block. It requests the shardBlockFinality headers greater than the highest shard header, for each shard, related // to the block which should be processed -func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { +func (mp *metaProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { - if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { + firstFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + 1 + lastFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + uint64(mp.shardBlockFinality) + for i := firstFinalityAttestingHeader; i <= lastFinalityAttestingHeader; i++ { + if mp.hdrsForCurrBlock.highestHdrNonce[shardId] == uint64(0) { continue } - _, _, err := process.GetShardHeaderFromPoolWithNonce( + shardBlock, shardBlockHash, err := process.GetShardHeaderFromPoolWithNonce( i, shardId, mp.dataPool.ShardHeaders(), mp.dataPool.HeadersNonces()) + if err != nil { requestedBlockHeaders++ go mp.onRequestHeaderHandlerByNonce(shardId, i) + continue } + + mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardBlockHash)] = &hdrInfo{hdr: shardBlock, usedInBlock: false} } } @@ -953,65 +962,56 @@ func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { _ = process.EmptyChannel(mp.chRcvAllHdrs) - mp.mutRequestedShardHdrsHashes.Lock() - - mp.requestedShardHdrsHashes = make(map[string]bool) - mp.allNeededShardHdrsFound = true - if len(metaBlock.ShardInfo) == 0 { - mp.mutRequestedShardHdrsHashes.Unlock() return 0, 0 } - missingHeaderHashes := mp.computeMissingHeaders(metaBlock) + missingHeaderHashes := mp.computeMissingAndExistingShardHeaders(metaBlock) - requestedBlockHeaders := uint32(0) - for shardId, headerHashes := range missingHeaderHashes { - for _, headerHash := range headerHashes { - requestedBlockHeaders++ - mp.requestedShardHdrsHashes[string(headerHash)] = true - go mp.onRequestHeaderHandler(shardId, headerHash) + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + for shardId, shardHeaderHashes := range missingHeaderHashes { + for _, hash := range shardHeaderHashes { + mp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} + go mp.onRequestHeaderHandler(shardId, hash) } } - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } else { - requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } + if mp.hdrsForCurrBlock.missingHdrs == 0 { + mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingHeaders() } - mp.mutRequestedShardHdrsHashes.Unlock() + requestedHdrs := mp.hdrsForCurrBlock.missingHdrs + requestedFinalityAttestingHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return requestedBlockHeaders, requestedFinalBlockHeaders + return requestedHdrs, requestedFinalityAttestingHdrs } -func (mp *metaProcessor) computeMissingHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { - missingHeaders := make(map[uint32][][]byte) - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } +func (mp *metaProcessor) computeMissingAndExistingShardHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { + missingHeadersHashes := make(map[uint32][][]byte) + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(metaBlock.ShardInfo); i++ { shardData := metaBlock.ShardInfo[i] hdr, err := process.GetShardHeaderFromPool( shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { - missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) + missingHeadersHashes[shardData.ShardId] = append(missingHeadersHashes[shardData.ShardId], shardData.HeaderHash) + mp.hdrsForCurrBlock.missingHdrs++ continue } - if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { - mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce + mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardData.HeaderHash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + + if hdr.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardData.ShardId] { + mp.hdrsForCurrBlock.highestHdrNonce[shardData.ShardId] = hdr.Nonce } } + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return missingHeaders + return missingHeadersHashes } func (mp *metaProcessor) checkAndProcessShardMiniBlockHeader( diff --git a/process/block/shardblock.go b/process/block/shardblock.go index b5e3c76b3f8..c8c078ac61b 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -21,24 +21,6 @@ import ( const maxCleanTime = time.Second -type nonceAndHashInfo struct { - hash []byte - nonce uint64 -} - -type hdrInfo struct { - usedInBlock bool - hdr data.HeaderHandler -} - -type hdrForBlock struct { - missingHdrs uint32 - missingFinalityAttestingHdrs uint32 - highestHdrNonce uint64 - mutHdrsForBlock sync.RWMutex - hdrHashAndInfo map[string]*hdrInfo -} - // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor @@ -46,7 +28,6 @@ type shardProcessor struct { metaBlockFinality int chRcvAllMetaHdrs chan bool - hdrsForCurrBlock hdrForBlock processedMiniBlocks map[string]map[string]struct{} mutProcessedMiniBlocks sync.RWMutex @@ -129,6 +110,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + sp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) sp.processedMiniBlocks = make(map[string]map[string]struct{}) metaBlockPool := sp.dataPool.MetaBlocks() @@ -205,6 +187,7 @@ func (sp *shardProcessor) ProcessBlock( return err } + sp.txCoordinator.CreateBlockStarted() sp.createBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) requestedMetaHdrs, requestedFinalityAttestingMetaHdrs := sp.requestMetaHeaders(header) @@ -218,7 +201,8 @@ func (sp *shardProcessor) ProcessBlock( return err } - if requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 { + haveMissingMetaHeaders := requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 + if haveMissingMetaHeaders { log.Info(fmt.Sprintf("requested %d missing meta headers and %d finality attesting meta headers\n", requestedMetaHdrs, requestedFinalityAttestingMetaHdrs)) @@ -226,13 +210,13 @@ func (sp *shardProcessor) ProcessBlock( err = sp.waitForMetaHdrHashes(haveTime()) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - missingHdrs := sp.hdrsForCurrBlock.missingHdrs + missingMetaHdrs := sp.hdrsForCurrBlock.missingHdrs sp.hdrsForCurrBlock.missingHdrs = 0 sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() if requestedMetaHdrs > 0 { - log.Info(fmt.Sprintf("received %d missing meta headers\n", requestedMetaHdrs-missingHdrs)) + log.Info(fmt.Sprintf("received %d missing meta headers\n", requestedMetaHdrs-missingMetaHdrs)) } if err != nil { @@ -632,6 +616,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui // as long as the transactions limit for the block has not been reached and there is still time to add transactions func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + sp.txCoordinator.CreateBlockStarted() sp.createBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() @@ -1114,7 +1099,7 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { return } - log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received meta block with hash %s and nonce %d from network\n", core.ToB64(metaBlockHash), metaBlock.Nonce)) @@ -1128,8 +1113,8 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { hdrInfoForHash.hdr = metaBlock sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.Nonce > sp.hdrsForCurrBlock.highestHdrNonce { - sp.hdrsForCurrBlock.highestHdrNonce = metaBlock.Nonce + if metaBlock.Nonce > sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] { + sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] = metaBlock.Nonce } } @@ -1175,8 +1160,10 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { // which should be processed func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) - for i := sp.hdrsForCurrBlock.highestHdrNonce + 1; i <= sp.hdrsForCurrBlock.highestHdrNonce+uint64(sp.metaBlockFinality); i++ { - if sp.hdrsForCurrBlock.highestHdrNonce == uint64(0) { + firstFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + 1 + lastFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + uint64(sp.metaBlockFinality) + for i := firstFinalityAttestingHeader; i <= lastFinalityAttestingHeader; i++ { + if sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] == uint64(0) { continue } @@ -1197,14 +1184,14 @@ func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { return requestedBlockHeaders } -func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint32) { +func (sp *shardProcessor) requestMetaHeaders(shardBlock *block.Header) (uint32, uint32) { _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) - if len(header.MetaBlockHashes) == 0 { + if len(shardBlock.MetaBlockHashes) == 0 { return 0, 0 } - missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(header) + missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(shardBlock) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for _, hash := range missingHeadersHashes { @@ -1240,8 +1227,8 @@ func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Hea sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} - if hdr.Nonce > sp.hdrsForCurrBlock.highestHdrNonce { - sp.hdrsForCurrBlock.highestHdrNonce = hdr.Nonce + if hdr.Nonce > sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] { + sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] = hdr.Nonce } } sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -1763,17 +1750,6 @@ func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( return maxMbSpaceRemained } -func (sp *shardProcessor) createBlockStarted() { - sp.txCoordinator.CreateBlockStarted() - - sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - sp.hdrsForCurrBlock.missingHdrs = 0 - sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 - sp.hdrsForCurrBlock.highestHdrNonce = 0 - sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) - sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() -} - //TODO: remove bool parameter and give instead the set to sort func (sp *shardProcessor) sortMetaHeadersForCurrentBlockByNonce(usedInBlock bool) ([]*block.MetaBlock, error) { hdrsForCurrentBlock := make([]*block.MetaBlock, 0) From 5ef23391e00b87f6b1d010d45f344d182cd057e9 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 7 Oct 2019 12:39:07 +0300 Subject: [PATCH 209/234] En 4164 heartbeat refactor (#508) * EN-4164: heartbeat refactor * EN-1754: fix failing tests * heartbeatMessageInfo refactor * EN-4164: added a component * EN-4164: some changes * heartbeatStorer proposal * EN-4164: storer * EN-4164: added tests for storer * Update monitor * EN-4164: small fixes * updated heartbeat tests * EN-4164: merge with dev * Updated tests for heartbeat and monitor * Updated tests for heartbeat and monitor edge cases * Fix after review * Fix after review 2 * Updated batchSize for HeartBeatDb to 1 fro 100 * Moved heartbeatMessagInfo_test package from heartbeat to heartbeat_test --- cmd/node/config/config.toml | 10 + cmd/node/factory/structs.go | 18 + config/config.go | 1 + dataRetriever/interface.go | 2 + integrationTests/node/heartbeat_test.go | 34 +- node/heartbeat/errors.go | 24 ++ node/heartbeat/export_test.go | 72 ++++ node/heartbeat/hearbeatMessageInfo.go | 81 +++-- node/heartbeat/hearbeatMessageInfo_test.go | 204 +++++++++--- node/heartbeat/heartbeat.go | 21 +- node/heartbeat/interface.go | 29 ++ node/heartbeat/messageProcessor.go | 86 +++++ node/heartbeat/monitor.go | 264 ++++++++++----- node/heartbeat/monitor_test.go | 361 +++++++++++---------- node/heartbeat/realTimer.go | 20 ++ node/heartbeat/storage/heartbeatStorer.go | 160 +++++++++ node/mock/heartbeatStorerStub.go | 44 +++ node/mock/messageHandlerStub.go | 18 + node/mock/mockTimer.go | 26 ++ node/mock/storerMock.go | 62 ++++ node/mock/storerStub.go | 17 +- node/node.go | 26 +- node/node_test.go | 45 +++ 23 files changed, 1293 insertions(+), 332 deletions(-) create mode 100644 node/heartbeat/export_test.go create mode 100644 node/heartbeat/messageProcessor.go create mode 100644 node/heartbeat/realTimer.go create mode 100644 node/heartbeat/storage/heartbeatStorer.go create mode 100644 node/mock/heartbeatStorerStub.go create mode 100644 node/mock/messageHandlerStub.go create mode 100644 node/mock/mockTimer.go create mode 100644 node/mock/storerMock.go diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 80160628fa6..83748a5413b 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -233,6 +233,16 @@ MinTimeToWaitBetweenBroadcastsInSec = 20 MaxTimeToWaitBetweenBroadcastsInSec = 25 DurationInSecToConsiderUnresponsive = 60 + [Heartbeat.HeartbeatStorage] + [Heartbeat.HeartbeatStorage.Cache] + Size = 100 + Type = "LRU" + [Heartbeat.HeartbeatStorage.DB] + FilePath = "HeartbeatStorage" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 1 + MaxOpenFiles = 10 # Consensus type which will be used (the current implementation can manage "bn" and "bls") # When consensus type is "bls" the multisig hasher type should be "blake2b" diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index a6d23d5dd1c..fff86ee4e11 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -860,6 +860,14 @@ func createShardDataStoreFromConfig( return nil, err } + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), + getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), + getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + store := dataRetriever.NewChainStorer() store.AddStorer(dataRetriever.TransactionUnit, txUnit) store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) @@ -871,6 +879,7 @@ func createShardDataStoreFromConfig( store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) return store, err } @@ -964,6 +973,14 @@ func createMetaChainDataStoreFromConfig( } } + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), + getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), + getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + store := dataRetriever.NewChainStorer() store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) @@ -974,6 +991,7 @@ func createMetaChainDataStoreFromConfig( hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) } + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) return store, err } diff --git a/config/config.go b/config/config.go index 472c1156f8b..40249c12445 100644 --- a/config/config.go +++ b/config/config.go @@ -135,6 +135,7 @@ type HeartbeatConfig struct { MinTimeToWaitBetweenBroadcastsInSec int MaxTimeToWaitBetweenBroadcastsInSec int DurationInSecToConsiderUnresponsive int + HeartbeatStorage StorageConfig } // GeneralSettingsConfig will hold the general settings for a node diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 202ab369088..c054fd36ea3 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -32,6 +32,8 @@ const ( RewardTransactionUnit UnitType = 8 // MetaHdrNonceHashDataUnit is the meta header nonce-hash pair data unit identifier MetaHdrNonceHashDataUnit UnitType = 9 + // HeartbeatUnit is the heartbeat storage unit identifier + HeartbeatUnit UnitType = 10 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 diff --git a/integrationTests/node/heartbeat_test.go b/integrationTests/node/heartbeat_test.go index c78b3c715a9..883665cb7e5 100644 --- a/integrationTests/node/heartbeat_test.go +++ b/integrationTests/node/heartbeat_test.go @@ -3,6 +3,8 @@ package node import ( "context" "encoding/hex" + "encoding/json" + "errors" "fmt" "testing" "time" @@ -13,6 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" @@ -159,16 +162,37 @@ func createSender(messenger p2p.Messenger, topic string) (*heartbeat.Sender, cry } func createMonitor(maxDurationPeerUnresponsive time.Duration) *heartbeat.Monitor { - suite := kyber.NewBlakeSHA256Ed25519() - signer := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) monitor, _ := heartbeat.NewMonitor( - signer, - keyGen, integrationTests.TestMarshalizer, maxDurationPeerUnresponsive, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var hb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &hb) + return &hb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + &heartbeat.RealTimer{}, ) return monitor diff --git a/node/heartbeat/errors.go b/node/heartbeat/errors.go index 4313c32ee16..37c929a94a3 100644 --- a/node/heartbeat/errors.go +++ b/node/heartbeat/errors.go @@ -34,3 +34,27 @@ var ErrNilAppStatusHandler = errors.New("nil AppStatusHandler") // ErrNilShardCoordinator signals that an operation has been attempted to or with a nil shard coordinator var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilTimer signals that a nil time getter handler has been provided +var ErrNilTimer = errors.New("nil time getter handler") + +// ErrNilMonitorDb signals that a nil monitor db was provided +var ErrNilMonitorDb = errors.New("nil monitor db") + +// ErrNilMessageHandler signals that the provided message handler is nil +var ErrNilMessageHandler = errors.New("nil message handler") + +// ErrNilHeartbeatStorer signals that the provided heartbeat storer is nil +var ErrNilHeartbeatStorer = errors.New("nil heartbeat storer") + +// ErrFetchGenesisTimeFromDb signals that the genesis time cannot be fetched from db +var ErrFetchGenesisTimeFromDb = errors.New("monitor: can't get genesis time from db") + +// ErrStoreGenesisTimeToDb signals that the genesis time cannot be store to db +var ErrStoreGenesisTimeToDb = errors.New("monitor: can't store genesis time") + +// ErrUnmarshalGenesisTime signals that the unmarshaling of the genesis time didn't work +var ErrUnmarshalGenesisTime = errors.New("monitor: can't unmarshal genesis time") + +// ErrMarshalGenesisTime signals that the marshaling of the genesis time didn't work +var ErrMarshalGenesisTime = errors.New("monitor: can't marshal genesis time") diff --git a/node/heartbeat/export_test.go b/node/heartbeat/export_test.go new file mode 100644 index 00000000000..ba086141946 --- /dev/null +++ b/node/heartbeat/export_test.go @@ -0,0 +1,72 @@ +package heartbeat + +import "time" + +func (m *Monitor) GetMessages() map[string]*heartbeatMessageInfo { + return m.heartbeatMessages +} + +func (m *Monitor) SetMessages(messages map[string]*heartbeatMessageInfo) { + m.heartbeatMessages = messages +} + +func (m *Monitor) GetHbmi(tmstp time.Time) *heartbeatMessageInfo { + return &heartbeatMessageInfo{ + maxDurationPeerUnresponsive: 0, + maxInactiveTime: Duration{}, + totalUpTime: Duration{}, + totalDownTime: Duration{}, + getTimeHandler: nil, + timeStamp: time.Time{}, + isActive: false, + receivedShardID: 0, + computedShardID: 0, + versionNumber: "", + nodeDisplayName: "", + isValidator: false, + lastUptimeDowntime: time.Time{}, + genesisTime: time.Time{}, + } +} + +func (m *Monitor) SendHeartbeatMessage(hb *Heartbeat) { + m.addHeartbeatMessageToMap(hb) +} + +func (m *Monitor) AddHeartbeatMessageToMap(hb *Heartbeat) { + m.addHeartbeatMessageToMap(hb) +} + +func NewHeartbeatMessageInfo( + maxDurationPeerUnresponsive time.Duration, + isValidator bool, + genesisTime time.Time, + timer Timer, +) (*heartbeatMessageInfo, error) { + return newHeartbeatMessageInfo( + maxDurationPeerUnresponsive, + isValidator, + genesisTime, + timer, + ) +} + +func (hbmi *heartbeatMessageInfo) GetTimeStamp() time.Time { + return hbmi.timeStamp +} + +func (hbmi *heartbeatMessageInfo) GetReceiverShardId() uint32 { + return hbmi.receivedShardID +} + +func (hbmi *heartbeatMessageInfo) GetTotalUpTime() Duration { + return hbmi.totalUpTime +} + +func (hbmi *heartbeatMessageInfo) GetTotalDownTime() Duration { + return hbmi.totalDownTime +} + +func (hbmi *heartbeatMessageInfo) GetIsActive() bool { + return hbmi.isActive +} diff --git a/node/heartbeat/hearbeatMessageInfo.go b/node/heartbeat/hearbeatMessageInfo.go index 56944d040d5..54f304883c1 100644 --- a/node/heartbeat/hearbeatMessageInfo.go +++ b/node/heartbeat/hearbeatMessageInfo.go @@ -4,8 +4,6 @@ import ( "time" ) -var emptyTimestamp = time.Time{} - // heartbeatMessageInfo retain the message info received from another node (identified by a public key) type heartbeatMessageInfo struct { maxDurationPeerUnresponsive time.Duration @@ -22,81 +20,114 @@ type heartbeatMessageInfo struct { nodeDisplayName string isValidator bool lastUptimeDowntime time.Time + genesisTime time.Time } // newHeartbeatMessageInfo returns a new instance of a heartbeatMessageInfo func newHeartbeatMessageInfo( maxDurationPeerUnresponsive time.Duration, isValidator bool, + genesisTime time.Time, + timer Timer, ) (*heartbeatMessageInfo, error) { if maxDurationPeerUnresponsive == 0 { return nil, ErrInvalidMaxDurationPeerUnresponsive } + if timer == nil || timer.IsInterfaceNil() { + return nil, ErrNilTimer + } hbmi := &heartbeatMessageInfo{ maxDurationPeerUnresponsive: maxDurationPeerUnresponsive, maxInactiveTime: Duration{0}, isActive: false, receivedShardID: uint32(0), - timeStamp: emptyTimestamp, - lastUptimeDowntime: time.Now(), + timeStamp: genesisTime, + lastUptimeDowntime: timer.Now(), totalUpTime: Duration{0}, totalDownTime: Duration{0}, versionNumber: "", nodeDisplayName: "", isValidator: isValidator, + genesisTime: genesisTime, + getTimeHandler: timer.Now, } - hbmi.getTimeHandler = hbmi.clockTime return hbmi, nil } -func (hbmi *heartbeatMessageInfo) clockTime() time.Time { - return time.Now() +func (hbmi *heartbeatMessageInfo) updateFields(crtTime time.Time) { + if crtTime.Sub(hbmi.genesisTime) < 0 { + return + } + validDuration := computeValidDuration(crtTime, hbmi) + previousActive := hbmi.isActive && validDuration + hbmi.isActive = true + hbmi.updateMaxInactiveTimeDuration(crtTime) + hbmi.updateUpAndDownTime(previousActive, crtTime) } -func (hbmi *heartbeatMessageInfo) updateFields() { - crtDuration := hbmi.getTimeHandler().Sub(hbmi.timeStamp) - crtDuration = maxDuration(0, crtDuration) +func (hbmi *heartbeatMessageInfo) computeActive(crtTime time.Time) { + if crtTime.Sub(hbmi.genesisTime) < 0 { + return + } + validDuration := computeValidDuration(crtTime, hbmi) + hbmi.isActive = hbmi.isActive && validDuration + hbmi.updateUpAndDownTime(hbmi.isActive, crtTime) - hbmi.isActive = crtDuration < hbmi.maxDurationPeerUnresponsive - hbmi.updateUpAndDownTime() - hbmi.updateMaxInactiveTimeDuration() } -// Wil update the total time a node was up and down -func (hbmi *heartbeatMessageInfo) updateUpAndDownTime() { - lastDuration := hbmi.clockTime().Sub(hbmi.lastUptimeDowntime) +func computeValidDuration(crtTime time.Time, hbmi *heartbeatMessageInfo) bool { + crtDuration := crtTime.Sub(hbmi.timeStamp) + crtDuration = maxDuration(0, crtDuration) + validDuration := crtDuration <= hbmi.maxDurationPeerUnresponsive + return validDuration +} + +// Will update the total time a node was up and down +func (hbmi *heartbeatMessageInfo) updateUpAndDownTime(previousActive bool, crtTime time.Time) { + if hbmi.lastUptimeDowntime.Sub(hbmi.genesisTime) < 0 { + hbmi.lastUptimeDowntime = hbmi.genesisTime + } + + lastDuration := crtTime.Sub(hbmi.lastUptimeDowntime) lastDuration = maxDuration(0, lastDuration) - if hbmi.isActive { + if previousActive && hbmi.isActive { hbmi.totalUpTime.Duration += lastDuration } else { hbmi.totalDownTime.Duration += lastDuration } - hbmi.lastUptimeDowntime = time.Now() + hbmi.lastUptimeDowntime = crtTime } // HeartbeatReceived processes a new message arrived from a peer -func (hbmi *heartbeatMessageInfo) HeartbeatReceived(computedShardID, receivedshardID uint32, version string, - nodeDisplayName string) { +func (hbmi *heartbeatMessageInfo) HeartbeatReceived( + computedShardID uint32, + receivedshardID uint32, + version string, + nodeDisplayName string, +) { crtTime := hbmi.getTimeHandler() - hbmi.updateFields() + hbmi.updateFields(crtTime) hbmi.computedShardID = computedShardID hbmi.receivedShardID = receivedshardID - hbmi.updateMaxInactiveTimeDuration() + hbmi.updateMaxInactiveTimeDuration(crtTime) hbmi.timeStamp = crtTime hbmi.versionNumber = version hbmi.nodeDisplayName = nodeDisplayName } -func (hbmi *heartbeatMessageInfo) updateMaxInactiveTimeDuration() { - crtDuration := hbmi.getTimeHandler().Sub(hbmi.timeStamp) +func (hbmi *heartbeatMessageInfo) updateMaxInactiveTimeDuration(currentTime time.Time) { + crtDuration := currentTime.Sub(hbmi.timeStamp) crtDuration = maxDuration(0, crtDuration) - if hbmi.maxInactiveTime.Duration < crtDuration && hbmi.timeStamp != emptyTimestamp { + greaterDurationThanMax := hbmi.maxInactiveTime.Duration < crtDuration + currentTimeAfterGenesis := hbmi.genesisTime.Sub(currentTime) < 0 + + if greaterDurationThanMax && currentTimeAfterGenesis { hbmi.maxInactiveTime.Duration = crtDuration } } diff --git a/node/heartbeat/hearbeatMessageInfo_test.go b/node/heartbeat/hearbeatMessageInfo_test.go index 7e8929df8cb..d58eb2c0e48 100644 --- a/node/heartbeat/hearbeatMessageInfo_test.go +++ b/node/heartbeat/hearbeatMessageInfo_test.go @@ -1,91 +1,217 @@ -package heartbeat +package heartbeat_test import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/stretchr/testify/assert" ) -//------ newHeartbeatMessageInfo +//------- newHeartbeatMessageInfo + func TestNewHeartbeatMessageInfo_InvalidDurationShouldErr(t *testing.T) { t.Parallel() - hbmi, err := newHeartbeatMessageInfo(0, false) + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 0, + false, + time.Time{}, + &mock.MockTimer{}, + ) assert.Nil(t, hbmi) - assert.Equal(t, ErrInvalidMaxDurationPeerUnresponsive, err) + assert.Equal(t, heartbeat.ErrInvalidMaxDurationPeerUnresponsive, err) +} + +func TestNewHeartbeatMessageInfo_NilGetTimeHandlerShouldErr(t *testing.T) { + t.Parallel() + + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 1, + false, + time.Time{}, + nil, + ) + + assert.Nil(t, hbmi) + assert.Equal(t, heartbeat.ErrNilTimer, err) } func TestNewHeartbeatMessageInfo_OkValsShouldWork(t *testing.T) { t.Parallel() - hbmi, err := newHeartbeatMessageInfo(1, false) + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 1, + false, + time.Time{}, + &mock.MockTimer{}, + ) assert.NotNil(t, hbmi) assert.Nil(t, err) } +//------- HeartbeatReceived + func TestHeartbeatMessageInfo_HeartbeatReceivedShouldUpdate(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - if incrementalTime < 2 { - incrementalTime++ - } - return time.Unix(0, incrementalTime) - } + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 10*time.Second, + false, + genesisTime, + mockTimer, + ) - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + mockTimer.IncrementSeconds(1) + + expectedTime := time.Unix(1, 0) hbmi.HeartbeatReceived(uint32(0), uint32(0), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) - assert.Equal(t, uint32(0), hbmi.receivedShardID) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, uint32(0), hbmi.GetReceiverShardId()) + mockTimer.IncrementSeconds(1) + expectedTime = time.Unix(2, 0) hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) - assert.Equal(t, uint32(1), hbmi.receivedShardID) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, uint32(1), hbmi.GetReceiverShardId()) } func TestHeartbeatMessageInfo_HeartbeatUpdateFieldsShouldWork(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(1), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - tReturned := time.Unix(0, incrementalTime) - incrementalTime += 10 + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) - return tReturned - } + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + mockTimer.IncrementSeconds(1) + expectedTime := time.Unix(1, 0) + expectedUptime := time.Duration(0) + expectedDownTime := time.Duration(1 * time.Second) hbmi.HeartbeatReceived(uint32(0), uint32(3), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, true, hbmi.GetIsActive()) + assert.Equal(t, expectedUptime, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDownTime, hbmi.GetTotalDownTime().Duration) } -func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { +func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpDownTime(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - tReturned := time.Unix(0, incrementalTime) - incrementalTime += 1 + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) - return tReturned - } + // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") + + expectedDownDuration := time.Duration(1 * time.Second) + expectedUpDuration := time.Duration(1 * time.Second) + assert.Equal(t, expectedUpDuration, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDownDuration, hbmi.GetTotalDownTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatLongerDurationThanMaxShouldUpdateDownTime(t *testing.T) { + t.Parallel() - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 500*time.Millisecond, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") - time.Sleep(10 * time.Millisecond) + mockTimer.IncrementSeconds(1) hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") - assert.True(t, hbmi.totalUpTime.Duration > time.Duration(0)) - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) + expectedDownDuration := time.Duration(2 * time.Second) + expectedUpDuration := time.Duration(0) + assert.Equal(t, expectedDownDuration, hbmi.GetTotalDownTime().Duration) + assert.Equal(t, expectedUpDuration, hbmi.GetTotalUpTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatBeforeGenesisShouldNotUpdateUpDownTime(t *testing.T) { + t.Parallel() + + mockTimer := &mock.MockTimer{} + genesisTime := time.Unix(5, 0) + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + + // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") + + expectedDuration := time.Duration(0) + assert.Equal(t, expectedDuration, hbmi.GetTotalDownTime().Duration) + assert.Equal(t, expectedDuration, hbmi.GetTotalUpTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatEqualGenesisShouldHaveUpDownTimeZero(t *testing.T) { + t.Parallel() + + mockTimer := &mock.MockTimer{} + genesisTime := time.Unix(1, 0) + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + + expectedDuration := time.Duration(0) + assert.Equal(t, expectedDuration, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDuration, hbmi.GetTotalDownTime().Duration) + expectedTime := time.Unix(1, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) } diff --git a/node/heartbeat/heartbeat.go b/node/heartbeat/heartbeat.go index 4b549e57d8f..e8f6ca23b32 100644 --- a/node/heartbeat/heartbeat.go +++ b/node/heartbeat/heartbeat.go @@ -22,9 +22,26 @@ type PubKeyHeartbeat struct { IsActive bool `json:"isActive"` ReceivedShardID uint32 `json:"receivedShardID"` ComputedShardID uint32 `json:"computedShardID"` - TotalUpTime Duration `json:"totalUpTime"` - TotalDownTime Duration `json:"totalDownTime"` + TotalUpTime int `json:"totalUpTimeSec"` + TotalDownTime int `json:"totalDownTimeSec"` VersionNumber string `json:"versionNumber"` IsValidator bool `json:"isValidator"` NodeDisplayName string `json:"nodeDisplayName"` } + +// HeartbeatDTO is the struct used for handling DB operations for heartbeatMessageInfo struct +type HeartbeatDTO struct { + MaxDurationPeerUnresponsive time.Duration + MaxInactiveTime Duration + TotalUpTime Duration + TotalDownTime Duration + TimeStamp time.Time + IsActive bool + ReceivedShardID uint32 + ComputedShardID uint32 + VersionNumber string + NodeDisplayName string + IsValidator bool + LastUptimeDowntime time.Time + GenesisTime time.Time +} diff --git a/node/heartbeat/interface.go b/node/heartbeat/interface.go index 12833c36901..550e6914965 100644 --- a/node/heartbeat/interface.go +++ b/node/heartbeat/interface.go @@ -1,7 +1,36 @@ package heartbeat +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/p2p" +) + // PeerMessenger defines a subset of the p2p.Messenger interface type PeerMessenger interface { Broadcast(topic string, buff []byte) IsInterfaceNil() bool } + +// MessageHandler defines what a message processor for heartbeat should do +type MessageHandler interface { + CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*Heartbeat, error) + IsInterfaceNil() bool +} + +//Timer defines an interface for tracking time +type Timer interface { + Now() time.Time + IsInterfaceNil() bool +} + +// HeartbeatStorageHandler defines what a heartbeat's storer should do +type HeartbeatStorageHandler interface { + LoadGenesisTime() (time.Time, error) + UpdateGenesisTime(genesisTime time.Time) error + LoadHbmiDTO(pubKey string) (*HeartbeatDTO, error) + SavePubkeyData(pubkey []byte, heartbeat *HeartbeatDTO) error + LoadKeys() ([][]byte, error) + SaveKeys(peersSlice [][]byte) error + IsInterfaceNil() bool +} diff --git a/node/heartbeat/messageProcessor.go b/node/heartbeat/messageProcessor.go new file mode 100644 index 00000000000..4c21686bcc7 --- /dev/null +++ b/node/heartbeat/messageProcessor.go @@ -0,0 +1,86 @@ +package heartbeat + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// MessageProcessor is the struct that will handle heartbeat message verifications and conversion between +// heartbeatMessageInfo and HeartbeatDTO +type MessageProcessor struct { + singleSigner crypto.SingleSigner + keygen crypto.KeyGenerator + marshalizer marshal.Marshalizer +} + +// NewMessageProcessor will return a new instance of MessageProcessor +func NewMessageProcessor( + singleSigner crypto.SingleSigner, + keygen crypto.KeyGenerator, + marshalizer marshal.Marshalizer, +) (*MessageProcessor, error) { + if singleSigner == nil || singleSigner.IsInterfaceNil() { + return nil, ErrNilSingleSigner + } + if keygen == nil || keygen.IsInterfaceNil() { + return nil, ErrNilKeyGenerator + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, ErrNilMarshalizer + } + + return &MessageProcessor{ + singleSigner: singleSigner, + keygen: keygen, + marshalizer: marshalizer, + }, nil +} + +// CreateHeartbeatFromP2pMessage will return a heartbeat if all the checks pass +func (mp *MessageProcessor) CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*Heartbeat, error) { + if message == nil || message.IsInterfaceNil() { + return nil, ErrNilMessage + } + if message.Data() == nil { + return nil, ErrNilDataToProcess + } + + hbRecv := &Heartbeat{} + + err := mp.marshalizer.Unmarshal(hbRecv, message.Data()) + if err != nil { + return nil, err + } + + err = mp.verifySignature(hbRecv) + if err != nil { + return nil, err + } + + return hbRecv, nil +} + +func (mp *MessageProcessor) verifySignature(hbRecv *Heartbeat) error { + senderPubKey, err := mp.keygen.PublicKeyFromByteArray(hbRecv.Pubkey) + if err != nil { + return err + } + + copiedHeartbeat := *hbRecv + copiedHeartbeat.Signature = nil + buffCopiedHeartbeat, err := mp.marshalizer.Marshal(copiedHeartbeat) + if err != nil { + return err + } + + return mp.singleSigner.Verify(senderPubKey, buffCopiedHeartbeat, hbRecv.Signature) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mp *MessageProcessor) IsInterfaceNil() bool { + if mp == nil { + return true + } + return false +} diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index edb941c3083..1d845587772 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -1,7 +1,9 @@ package heartbeat import ( + "bytes" "encoding/hex" + "fmt" "sort" "strings" "sync" @@ -9,7 +11,6 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -19,64 +20,133 @@ var log = logger.DefaultLogger() // Monitor represents the heartbeat component that processes received heartbeat messages type Monitor struct { - singleSigner crypto.SingleSigner maxDurationPeerUnresponsive time.Duration - keygen crypto.KeyGenerator marshalizer marshal.Marshalizer heartbeatMessages map[string]*heartbeatMessageInfo mutHeartbeatMessages sync.RWMutex pubKeysMap map[uint32][]string + fullPeersSlice [][]byte mutPubKeysMap sync.RWMutex appStatusHandler core.AppStatusHandler + genesisTime time.Time + messageHandler MessageHandler + storer HeartbeatStorageHandler + timer Timer } // NewMonitor returns a new monitor instance func NewMonitor( - singleSigner crypto.SingleSigner, - keygen crypto.KeyGenerator, marshalizer marshal.Marshalizer, maxDurationPeerUnresponsive time.Duration, pubKeysMap map[uint32][]string, + genesisTime time.Time, + messageHandler MessageHandler, + storer HeartbeatStorageHandler, + timer Timer, ) (*Monitor, error) { - if singleSigner == nil || singleSigner.IsInterfaceNil() { - return nil, ErrNilSingleSigner - } - if keygen == nil || keygen.IsInterfaceNil() { - return nil, ErrNilKeyGenerator - } if marshalizer == nil || marshalizer.IsInterfaceNil() { return nil, ErrNilMarshalizer } if len(pubKeysMap) == 0 { return nil, ErrEmptyPublicKeysMap } - - pubKeysMapCopy := make(map[uint32][]string, 0) + if messageHandler == nil || messageHandler.IsInterfaceNil() { + return nil, ErrNilMessageHandler + } + if storer == nil || storer.IsInterfaceNil() { + return nil, ErrNilHeartbeatStorer + } + if timer == nil || timer.IsInterfaceNil() { + return nil, ErrNilTimer + } mon := &Monitor{ - singleSigner: singleSigner, - keygen: keygen, marshalizer: marshalizer, heartbeatMessages: make(map[string]*heartbeatMessageInfo), maxDurationPeerUnresponsive: maxDurationPeerUnresponsive, appStatusHandler: &statusHandler.NilStatusHandler{}, + genesisTime: genesisTime, + messageHandler: messageHandler, + storer: storer, + timer: timer, + } + + err := mon.storer.UpdateGenesisTime(genesisTime) + if err != nil { + return nil, err + } + + err = mon.initializeHeartbeatMessagesInfo(pubKeysMap) + if err != nil { + return nil, err + } + + err = mon.loadRestOfPubKeysFromStorage() + if err != nil { + log.Warn(fmt.Sprintf("heartbeat can't load public keys from storage: %s", err.Error())) } + return mon, nil +} + +func (m *Monitor) initializeHeartbeatMessagesInfo(pubKeysMap map[uint32][]string) error { + pubKeysMapCopy := make(map[uint32][]string, 0) for shardId, pubKeys := range pubKeysMap { for _, pubkey := range pubKeys { + err := m.loadHbmiFromStorer(pubkey) + if err != nil { // if pubKey not found in DB, create a new instance + mhbi, errNewHbmi := newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, true, m.genesisTime, m.timer) + if errNewHbmi != nil { + return errNewHbmi + } + + mhbi.genesisTime = m.genesisTime + mhbi.computedShardID = shardId + m.heartbeatMessages[pubkey] = mhbi + } pubKeysMapCopy[shardId] = append(pubKeysMapCopy[shardId], pubkey) - mhbi, err := newHeartbeatMessageInfo(maxDurationPeerUnresponsive, true) + } + } + + m.pubKeysMap = pubKeysMapCopy + return nil +} + +func (m *Monitor) loadRestOfPubKeysFromStorage() error { + peersSlice, err := m.storer.LoadKeys() + if err != nil { + return err + } + + for _, peer := range peersSlice { + _, ok := m.heartbeatMessages[string(peer)] + if !ok { // peer not in nodes map + err = m.loadHbmiFromStorer(string(peer)) if err != nil { - return nil, err + continue } - - mhbi.computedShardID = shardId - mon.heartbeatMessages[pubkey] = mhbi } } - mon.pubKeysMap = pubKeysMapCopy - return mon, nil + + return nil +} + +func (m *Monitor) loadHbmiFromStorer(pubKey string) error { + hbmiDTO, err := m.storer.LoadHbmiDTO(pubKey) + if err != nil { + return err + } + + receivedHbmi := m.convertFromExportedStruct(*hbmiDTO, m.maxDurationPeerUnresponsive) + receivedHbmi.getTimeHandler = m.timer.Now + receivedHbmi.isActive = m.timer.Now().Sub(receivedHbmi.lastUptimeDowntime) <= m.maxDurationPeerUnresponsive + receivedHbmi.lastUptimeDowntime = m.timer.Now() + receivedHbmi.genesisTime = m.genesisTime + + m.heartbeatMessages[pubKey] = &receivedHbmi + + return nil } // SetAppStatusHandler will set the AppStatusHandler which will be used for monitoring @@ -92,46 +162,61 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P) error { - if message == nil || message.IsInterfaceNil() { - return ErrNilMessage - } - if message.Data() == nil { - return ErrNilDataToProcess + hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) + if err != nil { + return err } - hbRecv := &Heartbeat{} + //message is validated, process should be done async, method can return nil + go m.addHeartbeatMessageToMap(hbRecv) - err := m.marshalizer.Unmarshal(hbRecv, message.Data()) - if err != nil { - return err + return nil +} + +func (m *Monitor) addHeartbeatMessageToMap(hb *Heartbeat) { + m.mutHeartbeatMessages.Lock() + defer m.mutHeartbeatMessages.Unlock() + + pubKeyStr := string(hb.Pubkey) + hbmi, ok := m.heartbeatMessages[pubKeyStr] + if hbmi == nil || !ok { + var err error + hbmi, err = newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, false, m.genesisTime, m.timer) + if err != nil { + log.Error(err.Error()) + return + } + m.heartbeatMessages[pubKeyStr] = hbmi } - err = m.verifySignature(hbRecv) + computedShardID := m.computeShardID(pubKeyStr) + hbmi.HeartbeatReceived(computedShardID, hb.ShardID, hb.VersionNumber, hb.NodeDisplayName) + hbDTO := m.convertToExportedStruct(hbmi) + err := m.storer.SavePubkeyData(hb.Pubkey, &hbDTO) if err != nil { - return err + log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) } + m.addPeerToFullPeersSlice(hb.Pubkey) +} - //message is validated, process should be done async, method can return nil - go func(msg p2p.MessageP2P, hb *Heartbeat) { - m.mutHeartbeatMessages.Lock() - defer m.mutHeartbeatMessages.Unlock() - - pe := m.heartbeatMessages[string(hb.Pubkey)] - if pe == nil { - pe, err = newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, false) - if err != nil { - log.Error(err.Error()) - return - } - m.heartbeatMessages[string(hb.Pubkey)] = pe +func (m *Monitor) addPeerToFullPeersSlice(pubKey []byte) { + if !m.isPeerInFullPeersSlice(pubKey) { + m.fullPeersSlice = append(m.fullPeersSlice, pubKey) + err := m.storer.SaveKeys(m.fullPeersSlice) + if err != nil { + log.Warn(fmt.Sprintf("can't store the keys slice: %s", err.Error())) } + } +} - computedShardID := m.computeShardID(string(hb.Pubkey)) - pe.HeartbeatReceived(computedShardID, hb.ShardID, hb.VersionNumber, hb.NodeDisplayName) - m.updateAllHeartbeatMessages() - }(message, hbRecv) +func (m *Monitor) isPeerInFullPeersSlice(pubKey []byte) bool { + for _, peer := range m.fullPeersSlice { + if bytes.Equal(peer, pubKey) { + return true + } + } - return nil + return false } func (m *Monitor) computeShardID(pubkey string) uint32 { @@ -151,27 +236,18 @@ func (m *Monitor) computeShardID(pubkey string) uint32 { return m.heartbeatMessages[pubkey].computedShardID } -func (m *Monitor) verifySignature(hbRecv *Heartbeat) error { - senderPubKey, err := m.keygen.PublicKeyFromByteArray(hbRecv.Pubkey) - if err != nil { - return err - } - - copiedHeartbeat := *hbRecv - copiedHeartbeat.Signature = nil - buffCopiedHeartbeat, err := m.marshalizer.Marshal(copiedHeartbeat) - if err != nil { - return err - } - - return m.singleSigner.Verify(senderPubKey, buffCopiedHeartbeat, hbRecv.Signature) -} - -func (m *Monitor) updateAllHeartbeatMessages() { +func (m *Monitor) computeAllHeartbeatMessages() { counterActiveValidators := 0 counterConnectedNodes := 0 - for _, v := range m.heartbeatMessages { - v.updateFields() + for pk, v := range m.heartbeatMessages { + //TODO change here + v.computeActive(m.timer.Now()) + + hbDTO := m.convertToExportedStruct(v) + err := m.storer.SavePubkeyData([]byte(pk), &hbDTO) + if err != nil { + log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) + } if v.isActive { counterConnectedNodes++ @@ -188,9 +264,11 @@ func (m *Monitor) updateAllHeartbeatMessages() { // GetHeartbeats returns the heartbeat status func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { - m.mutHeartbeatMessages.RLock() + m.mutHeartbeatMessages.Lock() status := make([]PubKeyHeartbeat, len(m.heartbeatMessages)) + m.computeAllHeartbeatMessages() + idx := 0 for k, v := range m.heartbeatMessages { status[idx] = PubKeyHeartbeat{ @@ -200,16 +278,15 @@ func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { IsActive: v.isActive, ReceivedShardID: v.receivedShardID, ComputedShardID: v.computedShardID, - TotalUpTime: v.totalUpTime, - TotalDownTime: v.totalDownTime, + TotalUpTime: int(v.totalUpTime.Seconds()), + TotalDownTime: int(v.totalDownTime.Seconds()), VersionNumber: v.versionNumber, IsValidator: v.isValidator, NodeDisplayName: v.nodeDisplayName, } idx++ - } - m.mutHeartbeatMessages.RUnlock() + m.mutHeartbeatMessages.Unlock() sort.Slice(status, func(i, j int) bool { return strings.Compare(status[i].HexPublicKey, status[j].HexPublicKey) < 0 @@ -225,3 +302,40 @@ func (m *Monitor) IsInterfaceNil() bool { } return false } + +func (m *Monitor) convertToExportedStruct(v *heartbeatMessageInfo) HeartbeatDTO { + return HeartbeatDTO{ + TimeStamp: v.timeStamp, + MaxInactiveTime: v.maxInactiveTime, + IsActive: v.isActive, + ReceivedShardID: v.receivedShardID, + ComputedShardID: v.computedShardID, + TotalUpTime: v.totalUpTime, + TotalDownTime: v.totalDownTime, + VersionNumber: v.versionNumber, + IsValidator: v.isValidator, + NodeDisplayName: v.nodeDisplayName, + LastUptimeDowntime: v.lastUptimeDowntime, + GenesisTime: v.genesisTime, + } +} + +func (m *Monitor) convertFromExportedStruct(hbDTO HeartbeatDTO, maxDuration time.Duration) heartbeatMessageInfo { + hbmi := heartbeatMessageInfo{ + maxDurationPeerUnresponsive: maxDuration, + maxInactiveTime: hbDTO.MaxInactiveTime, + timeStamp: hbDTO.TimeStamp, + isActive: hbDTO.IsActive, + totalUpTime: hbDTO.TotalUpTime, + totalDownTime: hbDTO.TotalDownTime, + receivedShardID: hbDTO.ReceivedShardID, + computedShardID: hbDTO.ComputedShardID, + versionNumber: hbDTO.VersionNumber, + nodeDisplayName: hbDTO.NodeDisplayName, + isValidator: hbDTO.IsValidator, + lastUptimeDowntime: hbDTO.LastUptimeDowntime, + genesisTime: hbDTO.GenesisTime, + } + + return hbmi +} diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index e5f9fa13ae8..a5e033c8530 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -7,83 +7,129 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/heartbeat/storage" "github.com/ElrondNetwork/elrond-go/node/mock" + "github.com/ElrondNetwork/elrond-go/p2p" "github.com/stretchr/testify/assert" ) //------- NewMonitor -func TestNewMonitor_NilSingleSignerShouldErr(t *testing.T) { +func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( nil, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, 0, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilSingleSigner, err) + assert.Equal(t, heartbeat.ErrNilMarshalizer, err) } -func TestNewMonitor_NilKeygenShouldErr(t *testing.T) { +func TestNewMonitor_EmptyPublicKeyListShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - nil, &mock.MarshalizerMock{}, 0, - map[uint32][]string{0: {""}}, + make(map[uint32][]string), + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilKeyGenerator, err) + assert.Equal(t, heartbeat.ErrEmptyPublicKeysMap, err) } -func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { +func TestNewMonitor_NilMessageHandlerShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, + &mock.MarshalizerMock{}, + 0, + map[uint32][]string{0: {""}}, + time.Now(), nil, + &mock.HeartbeatStorerStub{}, + th, + ) + + assert.Nil(t, mon) + assert.Equal(t, heartbeat.ErrNilMessageHandler, err) +} + +func TestNewMonitor_NilHeartbeatStorerShouldErr(t *testing.T) { + t.Parallel() + + th := &mock.MockTimer{} + mon, err := heartbeat.NewMonitor( + &mock.MarshalizerMock{}, 0, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + nil, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilHeartbeatStorer, err) } -func TestNewMonitor_EmptyPublicKeyListShouldErr(t *testing.T) { +func TestNewMonitor_NilTimeHandlerShouldErr(t *testing.T) { t.Parallel() mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, 0, - make(map[uint32][]string), + map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + nil, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrEmptyPublicKeysMap, err) + assert.Equal(t, heartbeat.ErrNilTimer, err) } func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, 1, map[uint32][]string{0: {"pk1", "pk2"}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + }, + th, ) assert.NotNil(t, mon) @@ -95,6 +141,7 @@ func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { func TestNewMonitor_ShouldComputeShardId(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} pksPerShards := map[uint32][]string{ 0: {"pk0"}, 1: {"pk1"}, @@ -102,11 +149,26 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { maxDuration := time.Millisecond mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, maxDuration, pksPerShards, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + }, + th, ) assert.NotNil(t, mon) @@ -119,143 +181,54 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { //------- ProcessReceivedMessage -func TestMonitor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { - t.Parallel() - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(nil) - - assert.Equal(t, heartbeat.ErrNilMessage, err) -} - -func TestMonitor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { - t.Parallel() - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, - 0, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{}) - - assert.Equal(t, heartbeat.ErrNilDataToProcess, err) -} - -func TestMonitor_ProcessReceivedMessageMarshalFailsShouldErr(t *testing.T) { +func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { t.Parallel() - errExpected := errors.New("expected err") + pubKey := "pk1" + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { - return errExpected + (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) + return nil }, }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageWrongPubkeyShouldErr(t *testing.T) { - t.Parallel() - - errExpected := errors.New("expected err") - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, errExpected + time.Second*1000, + map[uint32][]string{0: {pubKey}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil }, }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { return nil }, - }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageVerifyFailsShouldErr(t *testing.T) { - t.Parallel() - - errExpected := errors.New("expected err") - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return errExpected + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { + LoadKeysCalled: func() ([][]byte, error) { return nil, nil }, - }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { - return nil - }, - }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { - t.Parallel() - - pubKey := "pk1" - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { return nil }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { - (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) + SaveKeysCalled: func(peersSlice [][]byte) error { return nil }, }, - time.Second*1000, - map[uint32][]string{0: {pubKey}}, + th, ) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) + hb := heartbeat.Heartbeat{ + Pubkey: []byte(pubKey), + } + hbBytes, _ := json.Marshal(hb) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -271,17 +244,8 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { pubKey := "pk1" + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) @@ -290,9 +254,39 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { }, time.Second*1000, map[uint32][]string{0: {"pk2"}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + th, ) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) + hb := heartbeat.Heartbeat{ + Pubkey: []byte(pubKey), + } + hbBytes, _ := json.Marshal(hb) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -309,17 +303,8 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { pubKey := []byte("pk1") + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb heartbeat.Heartbeat @@ -331,6 +316,32 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { }, time.Second*1000, map[uint32][]string{0: {"pk1"}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + th, ) // First send from pk1 from shard 0 @@ -378,18 +389,9 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { pubKey1 := "pk1-should-stay-online" pubKey2 := "pk2-should-go-offline" - + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerFake{}) + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb heartbeat.Heartbeat @@ -399,8 +401,18 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { return nil }, }, - time.Millisecond*5, + time.Second*5, map[uint32][]string{0: {pubKey1, pubKey2}}, + th.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + storer, + th, ) // First send from pk1 @@ -412,17 +424,20 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { assert.Nil(t, err) // set pk2 to inactive as max inactive time is lower - time.Sleep(6 * time.Millisecond) + time.Sleep(10 * time.Millisecond) + th.IncrementSeconds(6) // Check that both are added hbStatus := mon.GetHeartbeats() assert.Equal(t, 2, len(hbStatus)) + //assert.False(t, hbStatus[1].IsActive) // Now send a message from pk1 in order to see that pk2 is not active anymore err = sendHbMessageFromPubKey(pubKey1, mon) + time.Sleep(5 * time.Millisecond) assert.Nil(t, err) - time.Sleep(5 * time.Millisecond) + th.IncrementSeconds(4) hbStatus = mon.GetHeartbeats() diff --git a/node/heartbeat/realTimer.go b/node/heartbeat/realTimer.go new file mode 100644 index 00000000000..1fac46c253f --- /dev/null +++ b/node/heartbeat/realTimer.go @@ -0,0 +1,20 @@ +package heartbeat + +import "time" + +// RealTimer is an implementation of Timer and uses real time.now +type RealTimer struct { +} + +// Now returns the time.Now() Time +func (m *RealTimer) Now() time.Time { + return time.Now() +} + +// IsInterfaceNil verifies if the interface is nil +func (m *RealTimer) IsInterfaceNil() bool { + if m == nil { + return true + } + return false +} diff --git a/node/heartbeat/storage/heartbeatStorer.go b/node/heartbeat/storage/heartbeatStorer.go new file mode 100644 index 00000000000..bfe9aace5da --- /dev/null +++ b/node/heartbeat/storage/heartbeatStorer.go @@ -0,0 +1,160 @@ +package storage + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.DefaultLogger() + +const peersKeysDbEntry = "keys" +const genesisTimeDbEntry = "genesisTime" + +// HeartbeatDbStorer is the struct which will handle storage operations for heartbeat +type HeartbeatDbStorer struct { + storer storage.Storer + marshalizer marshal.Marshalizer +} + +// NewHeartbeatDbStorer will create an instance of HeartbeatDbStorer +func NewHeartbeatDbStorer( + storer storage.Storer, + marshalizer marshal.Marshalizer, +) (*HeartbeatDbStorer, error) { + if storer == nil || storer.IsInterfaceNil() { + return nil, heartbeat.ErrNilMonitorDb + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, heartbeat.ErrNilMarshalizer + } + + return &HeartbeatDbStorer{ + storer: storer, + marshalizer: marshalizer, + }, nil +} + +// LoadGenesisTime will return the genesis time saved in the storer +func (hs *HeartbeatDbStorer) LoadGenesisTime() (time.Time, error) { + genesisTimeFromDbBytes, err := hs.storer.Get([]byte(genesisTimeDbEntry)) + if err != nil { + return time.Time{}, heartbeat.ErrFetchGenesisTimeFromDb + } + + var genesisTimeFromDb time.Time + err = hs.marshalizer.Unmarshal(&genesisTimeFromDb, genesisTimeFromDbBytes) + if err != nil { + return time.Time{}, heartbeat.ErrUnmarshalGenesisTime + } + + return genesisTimeFromDb, nil +} + +// UpdateGenesisTime will update the saved genesis time and will log if the genesis time changed +func (hs *HeartbeatDbStorer) UpdateGenesisTime(genesisTime time.Time) error { + + genesisTimeFromDb, err := hs.LoadGenesisTime() + if err != nil && err != heartbeat.ErrFetchGenesisTimeFromDb { + return err + } + + err = hs.saveGenesisTimeToDb(genesisTime) + if err != nil { + return err + } + + if genesisTimeFromDb != genesisTime { + log.Info(fmt.Sprintf("updated heartbeat's genesis time to %s", genesisTimeFromDb)) + } + + return nil +} + +func (hs *HeartbeatDbStorer) saveGenesisTimeToDb(genesisTime time.Time) error { + genesisTimeBytes, err := hs.marshalizer.Marshal(genesisTime) + if err != nil { + return heartbeat.ErrMarshalGenesisTime + } + + err = hs.storer.Put([]byte(genesisTimeDbEntry), genesisTimeBytes) + if err != nil { + return heartbeat.ErrStoreGenesisTimeToDb + } + + return nil +} + +// LoadHbmiDTO will return the HeartbeatDTO for the given public key from storage +func (hs *HeartbeatDbStorer) LoadHbmiDTO(pubKey string) (*heartbeat.HeartbeatDTO, error) { + pkbytes := []byte(pubKey) + + hbFromDB, err := hs.storer.Get(pkbytes) + if err != nil { + return nil, err + } + + heartbeatDto := heartbeat.HeartbeatDTO{} + err = hs.marshalizer.Unmarshal(&heartbeatDto, hbFromDB) + if err != nil { + return nil, err + } + + return &heartbeatDto, nil +} + +// LoadKeys will return the keys saved in the storer, representing public keys of all peers the node is connected to +func (hs *HeartbeatDbStorer) LoadKeys() ([][]byte, error) { + allKeysBytes, err := hs.storer.Get([]byte(peersKeysDbEntry)) + if err != nil { + return nil, err + } + + var peersSlice [][]byte + err = hs.marshalizer.Unmarshal(&peersSlice, allKeysBytes) + if err != nil { + return nil, err + } + + return peersSlice, nil +} + +// SaveKeys will update the keys for all connected peers +func (hs *HeartbeatDbStorer) SaveKeys(peersSlice [][]byte) error { + marshalizedFullPeersSlice, errMarsh := hs.marshalizer.Marshal(peersSlice) + if errMarsh != nil { + return errMarsh + } + + return hs.storer.Put([]byte(peersKeysDbEntry), marshalizedFullPeersSlice) +} + +// SavePubkeyData will add or update a HeartbeatDTO in the storer +func (hs *HeartbeatDbStorer) SavePubkeyData( + pubkey []byte, + heartbeat *heartbeat.HeartbeatDTO, +) error { + marshalizedHeartBeat, err := hs.marshalizer.Marshal(heartbeat) + if err != nil { + return err + } + + errStore := hs.storer.Put(pubkey, marshalizedHeartBeat) + if errStore != nil { + return errStore + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hs *HeartbeatDbStorer) IsInterfaceNil() bool { + if hs == nil { + return true + } + return false +} diff --git a/node/mock/heartbeatStorerStub.go b/node/mock/heartbeatStorerStub.go new file mode 100644 index 00000000000..b68fba294ba --- /dev/null +++ b/node/mock/heartbeatStorerStub.go @@ -0,0 +1,44 @@ +package mock + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/node/heartbeat" +) + +type HeartbeatStorerStub struct { + LoadGenesisTimeCalled func() (time.Time, error) + UpdateGenesisTimeCalled func(genesisTime time.Time) error + LoadHbmiDTOCalled func(pubKey string) (*heartbeat.HeartbeatDTO, error) + SavePubkeyDataCalled func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error + LoadKeysCalled func() ([][]byte, error) + SaveKeysCalled func(peersSlice [][]byte) error +} + +func (hss *HeartbeatStorerStub) LoadGenesisTime() (time.Time, error) { + return hss.LoadGenesisTimeCalled() +} + +func (hss *HeartbeatStorerStub) UpdateGenesisTime(genesisTime time.Time) error { + return hss.UpdateGenesisTimeCalled(genesisTime) +} + +func (hss *HeartbeatStorerStub) LoadHbmiDTO(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return hss.LoadHbmiDTOCalled(pubKey) +} + +func (hss *HeartbeatStorerStub) SavePubkeyData(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return hss.SavePubkeyDataCalled(pubkey, heartbeat) +} + +func (hss *HeartbeatStorerStub) LoadKeys() ([][]byte, error) { + return hss.LoadKeysCalled() +} + +func (hss *HeartbeatStorerStub) SaveKeys(peersSlice [][]byte) error { + return hss.SaveKeysCalled(peersSlice) +} + +func (hss *HeartbeatStorerStub) IsInterfaceNil() bool { + return false +} diff --git a/node/mock/messageHandlerStub.go b/node/mock/messageHandlerStub.go new file mode 100644 index 00000000000..815f6265ab1 --- /dev/null +++ b/node/mock/messageHandlerStub.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type MessageHandlerStub struct { + CreateHeartbeatFromP2pMessageCalled func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) +} + +func (mhs *MessageHandlerStub) IsInterfaceNil() bool { + return false +} + +func (mhs *MessageHandlerStub) CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + return mhs.CreateHeartbeatFromP2pMessageCalled(message) +} diff --git a/node/mock/mockTimer.go b/node/mock/mockTimer.go new file mode 100644 index 00000000000..b86029048a8 --- /dev/null +++ b/node/mock/mockTimer.go @@ -0,0 +1,26 @@ +package mock + +import "time" + +type MockTimer struct { + seconds int64 +} + +func (m *MockTimer) Now() time.Time { + return time.Unix(m.seconds, 0) +} + +func (m *MockTimer) IsInterfaceNil() bool { + if m == nil { + return true + } + return false +} + +func (m *MockTimer) IncrementSeconds(value int) { + m.seconds += int64(value) +} + +func (m *MockTimer) SetSeconds(value int) { + m.seconds = int64(value) +} diff --git a/node/mock/storerMock.go b/node/mock/storerMock.go new file mode 100644 index 00000000000..fb22eadfd00 --- /dev/null +++ b/node/mock/storerMock.go @@ -0,0 +1,62 @@ +package mock + +import ( + "encoding/base64" + "errors" + "fmt" + "sync" +) + +type StorerMock struct { + mut sync.Mutex + data map[string][]byte +} + +func NewStorerMock() *StorerMock { + return &StorerMock{ + data: make(map[string][]byte), + } +} + +func (sm *StorerMock) Put(key, data []byte) error { + sm.mut.Lock() + defer sm.mut.Unlock() + sm.data[string(key)] = data + + return nil +} + +func (sm *StorerMock) Get(key []byte) ([]byte, error) { + sm.mut.Lock() + defer sm.mut.Unlock() + + val, ok := sm.data[string(key)] + if !ok { + return nil, errors.New(fmt.Sprintf("key: %s not found", base64.StdEncoding.EncodeToString(key))) + } + + return val, nil +} + +func (sm *StorerMock) Has(key []byte) error { + return errors.New("not implemented") +} + +func (sm *StorerMock) Remove(key []byte) error { + return errors.New("not implemented") +} + +func (sm *StorerMock) ClearCache() { +} + +func (sm *StorerMock) DestroyUnit() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sm *StorerMock) IsInterfaceNil() bool { + if sm == nil { + return true + } + return false +} diff --git a/node/mock/storerStub.go b/node/mock/storerStub.go index e91427e4e76..af7d1b3ee16 100644 --- a/node/mock/storerStub.go +++ b/node/mock/storerStub.go @@ -3,8 +3,7 @@ package mock type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) - HasCalled func(key []byte) (bool, error) - HasOrAddCalled func(key []byte, value []byte) (bool, error) + HasCalled func(key []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -18,14 +17,10 @@ func (ss *StorerStub) Get(key []byte) ([]byte, error) { return ss.GetCalled(key) } -func (ss *StorerStub) Has(key []byte) (bool, error) { +func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) (bool, error) { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } @@ -37,3 +32,11 @@ func (ss *StorerStub) ClearCache() { func (ss *StorerStub) DestroyUnit() error { return ss.DestroyUnitCalled() } + +// IsInterfaceNil returns true if there is no value under the interface +func (ss *StorerStub) IsInterfaceNil() bool { + if ss == nil { + return true + } + return false +} diff --git a/node/node.go b/node/node.go index 428ba1ef0b1..4e10c5bc929 100644 --- a/node/node.go +++ b/node/node.go @@ -28,6 +28,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/heartbeat/storage" "github.com/ElrondNetwork/elrond-go/ntp" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" @@ -701,12 +702,12 @@ func (n *Node) GetAccount(address string) (*state.Account, error) { } // StartHeartbeat starts the node's heartbeat processing/signaling module -func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber string, nodeDisplayName string) error { - if !config.Enabled { +func (n *Node) StartHeartbeat(hbConfig config.HeartbeatConfig, versionNumber string, nodeDisplayName string) error { + if !hbConfig.Enabled { return nil } - err := n.checkConfigParams(config) + err := n.checkConfigParams(hbConfig) if err != nil { return err } @@ -736,12 +737,25 @@ func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber strin return err } - n.heartbeatMonitor, err = heartbeat.NewMonitor( + heartbeatStorageUnit := n.store.GetStorer(dataRetriever.HeartbeatUnit) + heartBeatMsgProcessor, err := heartbeat.NewMessageProcessor( n.singleSigner, n.keyGen, + n.marshalizer) + if err != nil { + return err + } + + heartbeatStorer, err := storage.NewHeartbeatDbStorer(heartbeatStorageUnit, n.marshalizer) + timer := &heartbeat.RealTimer{} + n.heartbeatMonitor, err = heartbeat.NewMonitor( n.marshalizer, - time.Second*time.Duration(config.DurationInSecToConsiderUnresponsive), + time.Second*time.Duration(hbConfig.DurationInSecToConsiderUnresponsive), n.initialNodesPubkeys, + n.genesisTime, + heartBeatMsgProcessor, + heartbeatStorer, + timer, ) if err != nil { return err @@ -757,7 +771,7 @@ func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber strin return err } - go n.startSendingHeartbeats(config) + go n.startSendingHeartbeats(hbConfig) return nil } diff --git a/node/node_test.go b/node/node_test.go index bcc69c3795c..4a851404342 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -1015,6 +1015,11 @@ func TestNode_StartHeartbeatNilMarshalizerShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1052,6 +1057,11 @@ func TestNode_StartHeartbeatNilKeygenShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1081,6 +1091,11 @@ func TestNode_StartHeartbeatHasTopicValidatorShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1116,6 +1131,11 @@ func TestNode_StartHeartbeatCreateTopicFailsShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1154,6 +1174,11 @@ func TestNode_StartHeartbeatRegisterMessageProcessorFailsShouldErr(t *testing.T) node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1211,6 +1236,11 @@ func TestNode_StartHeartbeatShouldWorkAndCallSendHeartbeat(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1264,6 +1294,11 @@ func TestNode_StartHeartbeatShouldWorkAndHaveAllPublicKeys(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1318,6 +1353,11 @@ func TestNode_StartHeartbeatShouldSetNodesFromInitialPubKeysAsValidators(t *test }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1377,6 +1417,11 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ From d4f36ef1e15c2c2de38d7654feb3c10d421e03b8 Mon Sep 17 00:00:00 2001 From: Radu Chis Date: Mon, 7 Oct 2019 18:38:09 +0300 Subject: [PATCH 210/234] update heartbeat isactive --- node/heartbeat/hearbeatMessageInfo.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/node/heartbeat/hearbeatMessageInfo.go b/node/heartbeat/hearbeatMessageInfo.go index 54f304883c1..8d80921d0cb 100644 --- a/node/heartbeat/hearbeatMessageInfo.go +++ b/node/heartbeat/hearbeatMessageInfo.go @@ -58,24 +58,26 @@ func newHeartbeatMessageInfo( } func (hbmi *heartbeatMessageInfo) updateFields(crtTime time.Time) { - if crtTime.Sub(hbmi.genesisTime) < 0 { - return - } validDuration := computeValidDuration(crtTime, hbmi) previousActive := hbmi.isActive && validDuration hbmi.isActive = true - hbmi.updateMaxInactiveTimeDuration(crtTime) - hbmi.updateUpAndDownTime(previousActive, crtTime) + + hbmi.updateTimes(crtTime, previousActive) } func (hbmi *heartbeatMessageInfo) computeActive(crtTime time.Time) { - if crtTime.Sub(hbmi.genesisTime) < 0 { - return - } validDuration := computeValidDuration(crtTime, hbmi) hbmi.isActive = hbmi.isActive && validDuration - hbmi.updateUpAndDownTime(hbmi.isActive, crtTime) + hbmi.updateTimes(crtTime, hbmi.isActive) +} + +func (hbmi *heartbeatMessageInfo) updateTimes(crtTime time.Time, previousActive bool) { + if crtTime.Sub(hbmi.genesisTime) < 0 { + return + } + hbmi.updateMaxInactiveTimeDuration(crtTime) + hbmi.updateUpAndDownTime(previousActive, crtTime) } func computeValidDuration(crtTime time.Time, hbmi *heartbeatMessageInfo) bool { From 33b7f53e1f02b7572183d940feb656f0c1525fbd Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Mon, 7 Oct 2019 19:42:33 +0300 Subject: [PATCH 211/234] fixed heartbeat counters to be called when a new message is received --- node/heartbeat/monitor.go | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index 1d845587772..6550e9bedfc 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -169,6 +169,12 @@ func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P) error { //message is validated, process should be done async, method can return nil go m.addHeartbeatMessageToMap(hbRecv) + go func() { + m.mutHeartbeatMessages.Lock() + defer m.mutHeartbeatMessages.Unlock() + + m.computeAllHeartbeatMessages() + }() return nil } @@ -239,16 +245,9 @@ func (m *Monitor) computeShardID(pubkey string) uint32 { func (m *Monitor) computeAllHeartbeatMessages() { counterActiveValidators := 0 counterConnectedNodes := 0 - for pk, v := range m.heartbeatMessages { - //TODO change here + for _, v := range m.heartbeatMessages { v.computeActive(m.timer.Now()) - hbDTO := m.convertToExportedStruct(v) - err := m.storer.SavePubkeyData([]byte(pk), &hbDTO) - if err != nil { - log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) - } - if v.isActive { counterConnectedNodes++ @@ -262,12 +261,23 @@ func (m *Monitor) computeAllHeartbeatMessages() { m.appStatusHandler.SetUInt64Value(core.MetricConnectedNodes, uint64(counterConnectedNodes)) } +func (m *Monitor) saveHeartbeats() { + for pk, v := range m.heartbeatMessages { + hbDTO := m.convertToExportedStruct(v) + err := m.storer.SavePubkeyData([]byte(pk), &hbDTO) + if err != nil { + log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) + } + } +} + // GetHeartbeats returns the heartbeat status func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { m.mutHeartbeatMessages.Lock() status := make([]PubKeyHeartbeat, len(m.heartbeatMessages)) m.computeAllHeartbeatMessages() + m.saveHeartbeats() idx := 0 for k, v := range m.heartbeatMessages { From 5d7693bb235faae8cb62692217ea6c65a51976ef Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 8 Oct 2019 10:27:29 +0300 Subject: [PATCH 212/234] removed heartbeat storage when serving a rest API request --- node/heartbeat/monitor.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index 6550e9bedfc..347c3059c51 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -261,23 +261,12 @@ func (m *Monitor) computeAllHeartbeatMessages() { m.appStatusHandler.SetUInt64Value(core.MetricConnectedNodes, uint64(counterConnectedNodes)) } -func (m *Monitor) saveHeartbeats() { - for pk, v := range m.heartbeatMessages { - hbDTO := m.convertToExportedStruct(v) - err := m.storer.SavePubkeyData([]byte(pk), &hbDTO) - if err != nil { - log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) - } - } -} - // GetHeartbeats returns the heartbeat status func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { m.mutHeartbeatMessages.Lock() status := make([]PubKeyHeartbeat, len(m.heartbeatMessages)) m.computeAllHeartbeatMessages() - m.saveHeartbeats() idx := 0 for k, v := range m.heartbeatMessages { From a0ac4e55e77aefa56750bdc29fe804501f29e4f2 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 8 Oct 2019 11:14:53 +0300 Subject: [PATCH 213/234] EN-4257 - modify economics parameters to work with big values --- cmd/node/config/economics.toml | 7 +- cmd/node/main.go | 5 +- config/economicsConfig.go | 7 +- config/tomlConfig_test.go | 15 +- .../block/executingRewardMiniblocks_test.go | 14 +- .../smartContract/testInitializer.go | 7 +- integrationTests/testProcessorNode.go | 13 +- process/block/preprocess/rewardsHandler.go | 2 +- .../block/preprocess/rewardsHandler_test.go | 4 +- process/coordinator/process_test.go | 25 +- process/economics/economicsData.go | 105 ++++++-- process/economics/economicsData_test.go | 238 +++++++++++++----- process/errors.go | 15 +- process/interface.go | 3 +- process/mock/rewardsHandlerMock.go | 6 +- process/transaction/process.go | 5 +- 16 files changed, 327 insertions(+), 144 deletions(-) diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index e90b4bce29f..e562d387574 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -4,15 +4,14 @@ BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" [RewardsSettings] - RewardsValue = 1000 + RewardsValue = "1000" CommunityPercentage = 0.10 LeaderPercentage = 0.50 BurnPercentage = 0.40 [FeeSettings] - MinGasPrice = 0 - MinGasLimitForTx = 5 - MinTxFee = 0 + MinGasPrice = "0" + MinGasLimitForTx = "5" diff --git a/cmd/node/main.go b/cmd/node/main.go index 61d3e54277a..b84645fe5bb 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -647,7 +647,10 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - economicsData := economics.NewEconomicsData(economicsConfig) + economicsData, err := economics.NewEconomicsData(economicsConfig) + if err != nil { + return err + } processArgs := factory.NewProcessComponentsFactoryArgs( genesisConfig, diff --git a/config/economicsConfig.go b/config/economicsConfig.go index d1c1623e127..dfb4fbad4bc 100644 --- a/config/economicsConfig.go +++ b/config/economicsConfig.go @@ -8,7 +8,7 @@ type EconomicsAddresses struct { // RewardsSettings will hold economics rewards settings type RewardsSettings struct { - RewardsValue uint64 + RewardsValue string CommunityPercentage float64 LeaderPercentage float64 BurnPercentage float64 @@ -16,9 +16,8 @@ type RewardsSettings struct { // FeeSettings will hold economics fee settings type FeeSettings struct { - MinGasPrice uint64 - MinGasLimitForTx uint64 - MinTxFee uint64 + MinGasPrice string + MinGasLimitForTx string } // ConfigEconomics will hold economics config diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d8bcaebcb0e..4f740f8daa5 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -121,13 +121,12 @@ func TestTomlParser(t *testing.T) { func TestTomlEconomicsParser(t *testing.T) { communityAddress := "commAddr" burnAddress := "burnAddr" - rewardsValue := uint64(500) + rewardsValue := "500" communityPercentage := 0.1 leaderPercentage := 0.1 burnPercentage := 0.8 - minGasPrice := uint64(1) - minGasLimitForTx := uint64(2) - minTxFee := uint64(3) + minGasPrice := "1" + minGasLimitForTx := "2" cfgEconomicsExpected := ConfigEconomics{ EconomicsAddresses: EconomicsAddresses{ @@ -143,7 +142,6 @@ func TestTomlEconomicsParser(t *testing.T) { FeeSettings: FeeSettings{ MinGasPrice: minGasPrice, MinGasLimitForTx: minGasLimitForTx, - MinTxFee: minTxFee, }, } @@ -152,14 +150,13 @@ func TestTomlEconomicsParser(t *testing.T) { CommunityAddress = "` + communityAddress + `" BurnAddress = "` + burnAddress + `" [RewardsSettings] - RewardsValue = ` + strconv.FormatUint(rewardsValue, 10) + ` + RewardsValue = "` + rewardsValue + `" CommunityPercentage = ` + fmt.Sprintf("%.6f", communityPercentage) + ` LeaderPercentage = ` + fmt.Sprintf("%.6f", leaderPercentage) + ` BurnPercentage = ` + fmt.Sprintf("%.6f", burnPercentage) + ` [FeeSettings] - MinGasPrice = ` + strconv.FormatUint(minGasPrice, 10) + ` - MinGasLimitForTx = ` + strconv.FormatUint(minGasLimitForTx, 10) + ` - MinTxFee = ` + strconv.FormatUint(minTxFee, 10) + ` + MinGasPrice = "` + minGasPrice + `" + MinGasLimitForTx = "` + minGasLimitForTx + `" ` cfg := ConfigEconomics{} diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index 2d4cf4e7a91..b76d4c1d382 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" ) -func getRewardValue(node *integrationTests.TestProcessorNode) uint64 { +func getRewardValue(node *integrationTests.TestProcessorNode) *big.Int { return node.EconomicsData.RewardsValue() } @@ -334,7 +334,8 @@ func verifyRewardsForMetachain( acc, err := nodes[0][0].AccntState.GetExistingAccount(addrContainer) assert.Nil(t, err) - expectedBalance := big.NewInt(int64(uint64(numOfTimesRewarded) * rewardValue)) + expectedBalance := big.NewInt(0).SetUint64(uint64(numOfTimesRewarded)) + expectedBalance = expectedBalance.Mul(expectedBalance, rewardValue) assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) } } @@ -359,9 +360,14 @@ func verifyRewardsForShards( assert.Nil(t, err) nbProposedTxs := nbTxsForLeaderAddress[address] - expectedBalance := int64(nbRewards)*int64(rewardValue) + int64(nbProposedTxs)*int64(feePerTxForLeader) + expectedBalance := big.NewInt(0).SetUint64(uint64(nbRewards)) + expectedBalance = expectedBalance.Mul(expectedBalance, rewardValue) + totalFees := big.NewInt(0).SetUint64(uint64(nbProposedTxs)) + totalFees = totalFees.Mul(totalFees, big.NewInt(0).SetUint64(uint64(feePerTxForLeader))) + + expectedBalance = expectedBalance.Add(expectedBalance, totalFees) fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) - assert.Equal(t, big.NewInt(expectedBalance), acc.(*state.Account).Balance) + assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) } } } diff --git a/integrationTests/multiShard/smartContract/testInitializer.go b/integrationTests/multiShard/smartContract/testInitializer.go index 16d04c7be28..bdd24178118 100644 --- a/integrationTests/multiShard/smartContract/testInitializer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -13,7 +13,6 @@ import ( "sync/atomic" "time" - "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core/partitioning" @@ -357,11 +356,7 @@ func createNetNode( 100, ) - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{}, - RewardsSettings: config.RewardsSettings{}, - FeeSettings: config.FeeSettings{}, - }) + economicsData := &economics.EconomicsData{} interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index a2619f131f5..aa3884f39a5 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + "strconv" "sync/atomic" "time" @@ -254,22 +255,24 @@ func (tpn *TestProcessorNode) initChainHandler() { } func (tpn *TestProcessorNode) initEconomicsData() { - economicsData := economics.NewEconomicsData( + mingGasPrice := strconv.FormatUint(MinTxGasPrice, 10) + minGasLimitForTx := strconv.FormatUint(MinTxGasLimit, 10) + + economicsData, _ := economics.NewEconomicsData( &config.ConfigEconomics{ EconomicsAddresses: config.EconomicsAddresses{ CommunityAddress: "addr1", BurnAddress: "addr2", }, RewardsSettings: config.RewardsSettings{ - RewardsValue: 1000, + RewardsValue: "1000", CommunityPercentage: 0.10, LeaderPercentage: 0.50, BurnPercentage: 0.40, }, FeeSettings: config.FeeSettings{ - MinGasPrice: MinTxGasPrice, - MinGasLimitForTx: MinTxGasLimit, - MinTxFee: MinTxGasPrice * MinTxGasLimit, + MinGasPrice: mingGasPrice, + MinGasLimitForTx: minGasLimitForTx, }, }, ) diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index c866a26b25e..4f2bd7f46d6 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -73,7 +73,7 @@ func NewRewardTxHandler( return nil, process.ErrNilEconomicsRewardsHandler } - rewardValue := big.NewInt(int64(economicsRewards.RewardsValue())) + rewardValue := economicsRewards.RewardsValue() rtxh := &rewardsHandler{ address: address, diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go index b343f56b5bd..60d7292af5b 100644 --- a/process/block/preprocess/rewardsHandler_test.go +++ b/process/block/preprocess/rewardsHandler_test.go @@ -17,8 +17,8 @@ import ( func RewandsHandlerMock() *mock.RewardsHandlerMock { return &mock.RewardsHandlerMock{ - RewardsValueCalled: func() uint64 { - return 1000 + RewardsValueCalled: func() *big.Int { + return big.NewInt(1000) }, CommunityPercentageCalled: func() float64 { return 0.10 diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0665ebe6484..dec953e334a 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -393,13 +392,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { } func createInterimProcessorContainer() process.IntermediateProcessorContainer { - economicsData := economics.NewEconomicsData( - &config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{}, - RewardsSettings: config.RewardsSettings{}, - FeeSettings: config.FeeSettings{}, - }, - ) + economicsData := &economics.EconomicsData{} preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(5), &mock.MarshalizerMock{}, @@ -1689,13 +1682,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testing.T) { t.Parallel() - economicsData := economics.NewEconomicsData( - &config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{}, - RewardsSettings: config.RewardsSettings{}, - FeeSettings: config.FeeSettings{}, - }, - ) + economicsData := &economics.EconomicsData{} txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) @@ -1742,13 +1729,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { t.Parallel() - economicsData := economics.NewEconomicsData( - &config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{}, - RewardsSettings: config.RewardsSettings{}, - FeeSettings: config.FeeSettings{}, - }, - ) + economicsData := &economics.EconomicsData{} txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 0d9b63b3d96..987ac3e6ea9 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -1,41 +1,113 @@ package economics import ( + "math/big" + "strconv" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process" ) // EconomicsData will store information about economics type EconomicsData struct { - rewardsValue uint64 + rewardsValue *big.Int communityPercentage float64 leaderPercentage float64 burnPercentage float64 - - minGasPrice uint64 - minGasLimitForTx uint64 - minTxFee uint64 - - communityAddress string - burnAddress string + minGasPrice uint64 + minGasLimitForTx uint64 + communityAddress string + burnAddress string } // NewEconomicsData will create and object with information about economics parameters -func NewEconomicsData(economics *config.ConfigEconomics) *EconomicsData { +func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) { + //TODO check addresses what happens if addresses are wrong + rewardsValue, minGasPrice, minGasLimitForTx, err := convertValues(economics) + if err != nil { + return nil, err + } + + notGreaterThanZero := rewardsValue.Cmp(big.NewInt(0)) + if notGreaterThanZero < 0 { + return nil, process.ErrInvalidRewardsValue + } + + err = checkValues(economics) + if err != nil { + return nil, err + } + return &EconomicsData{ - rewardsValue: economics.RewardsSettings.RewardsValue, + rewardsValue: rewardsValue, communityPercentage: economics.RewardsSettings.CommunityPercentage, leaderPercentage: economics.RewardsSettings.LeaderPercentage, burnPercentage: economics.RewardsSettings.BurnPercentage, - minGasPrice: economics.FeeSettings.MinGasPrice, - minGasLimitForTx: economics.FeeSettings.MinGasLimitForTx, - minTxFee: economics.FeeSettings.MinTxFee, + minGasPrice: minGasPrice, + minGasLimitForTx: minGasLimitForTx, communityAddress: economics.EconomicsAddresses.CommunityAddress, burnAddress: economics.EconomicsAddresses.BurnAddress, + }, nil +} + +func convertValues(economics *config.ConfigEconomics) (*big.Int, uint64, uint64, error) { + conversionBase := 10 + bitConversionSize := 64 + + rewardsValue := new(big.Int) + rewardsValue, ok := rewardsValue.SetString(economics.RewardsSettings.RewardsValue, conversionBase) + if !ok { + return nil, 0, 0, process.ErrInvalidRewardsValue } + minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) + if err != nil { + return nil, 0, 0, process.ErrInvalidMinimumGasPrice + } + minGasLimitForTx, err := strconv.ParseUint(economics.FeeSettings.MinGasLimitForTx, conversionBase, bitConversionSize) + if err != nil { + return nil, 0, 0, process.ErrInvalidMinimumGasLimitForTx + } + + return rewardsValue, minGasPrice, minGasLimitForTx, nil +} + +func checkValues(economics *config.ConfigEconomics) error { + bigBurnPercentage := big.NewFloat(economics.RewardsSettings.BurnPercentage) + bigCommunityPercentage := big.NewFloat(economics.RewardsSettings.CommunityPercentage) + bigLeaderPercentage := big.NewFloat(economics.RewardsSettings.LeaderPercentage) + + notGreaterOrEqualWithZero := bigBurnPercentage.Cmp(big.NewFloat(0.0)) + notLessThanOne := big.NewFloat(1.0).Cmp(bigBurnPercentage) + if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { + return process.ErrInvalidRewardsPercentages + } + + notGreaterOrEqualWithZero = bigCommunityPercentage.Cmp(big.NewFloat(0.0)) + notLessThanOne = big.NewFloat(1.0).Cmp(bigCommunityPercentage) + if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { + return process.ErrInvalidRewardsPercentages + } + + notGreaterOrEqualWithZero = bigLeaderPercentage.Cmp(big.NewFloat(0.0)) + notLessThanOne = big.NewFloat(1.0).Cmp(bigLeaderPercentage) + if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { + return process.ErrInvalidRewardsPercentages + } + + sumPercentage := new(big.Float) + sumPercentage = sumPercentage.Add(bigBurnPercentage, bigCommunityPercentage) + sumPercentage = sumPercentage.Add(sumPercentage, bigLeaderPercentage) + + equalsWithOne := sumPercentage.Cmp(big.NewFloat(1.0)) + if equalsWithOne != 0 { + return process.ErrInvalidRewardsPercentages + } + + return nil } // RewardsValue will return rewards value -func (ed *EconomicsData) RewardsValue() uint64 { +func (ed *EconomicsData) RewardsValue() *big.Int { return ed.rewardsValue } @@ -64,11 +136,6 @@ func (ed *EconomicsData) MinGasLimitForTx() uint64 { return ed.minGasLimitForTx } -// MinTxFee will return minimum transaction fee -func (ed *EconomicsData) MinTxFee() uint64 { - return ed.minTxFee -} - // CommunityAddress will return community address func (ed *EconomicsData) CommunityAddress() string { return ed.communityAddress diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 2cb35ac7243..9053f097aaa 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -1,43 +1,185 @@ package economics_test import ( + "math/big" + "strconv" "testing" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/stretchr/testify/assert" ) -func TestEconomicsData_ShouldWork(t *testing.T) { +func createDummyEconomicsConfig() *config.ConfigEconomics { + return &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + CommunityAddress: "addr1", + BurnAddress: "addr2", + }, + RewardsSettings: config.RewardsSettings{ + RewardsValue: "1000", + CommunityPercentage: 0.1, + LeaderPercentage: 0.1, + BurnPercentage: 0.8, + }, + FeeSettings: config.FeeSettings{ + MinGasPrice: "100", + MinGasLimitForTx: "500", + }, + } +} + +func TestNewEconomicsData_InvalidRewardsValueShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + badRewardsValues := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + } + + for _, rewardsValue := range badRewardsValues { + economicsConfig.RewardsSettings.RewardsValue = rewardsValue + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsValue, err) + } +} + +func TestNewEconomicsData_InvalidMinGasPriceShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + badGasPrice := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasPrice := range badGasPrice { + economicsConfig.FeeSettings.MinGasPrice = gasPrice + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidMinimumGasPrice, err) + } + +} + +func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + bagMinGasLimit := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, minGasLimit := range bagMinGasLimit { + economicsConfig.FeeSettings.MinGasLimitForTx = minGasLimit + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidMinimumGasLimitForTx, err) + } + +} + +func TestNewEconomicsData_InvalidBurnPercentageShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = -1.0 + economicsConfig.RewardsSettings.CommunityPercentage = 0.1 + economicsConfig.RewardsSettings.LeaderPercentage = 0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_InvalidCommunityPercentageShouldErr(t *testing.T) { t.Parallel() - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{}) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.1 + economicsConfig.RewardsSettings.CommunityPercentage = -0.1 + economicsConfig.RewardsSettings.LeaderPercentage = 0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.1 + economicsConfig.RewardsSettings.CommunityPercentage = 0.1 + economicsConfig.RewardsSettings.LeaderPercentage = -0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} +func TestNewEconomicsData_InvalidRewardsPercentageSumShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.5 + economicsConfig.RewardsSettings.CommunityPercentage = 0.2 + economicsConfig.RewardsSettings.LeaderPercentage = 0.5 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_ShouldWork(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economicsConfig) assert.NotNil(t, economicsData) } func TestEconomicsData_RewardsValue(t *testing.T) { t.Parallel() - rewardsValue := uint64(100) - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - RewardsSettings: config.RewardsSettings{ - RewardsValue: rewardsValue, - }, - }) + rewardsValue := int64(100) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.RewardsValue = strconv.FormatInt(rewardsValue, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.RewardsValue() - assert.Equal(t, rewardsValue, value) + assert.Equal(t, big.NewInt(rewardsValue), value) } func TestEconomicsData_CommunityPercentage(t *testing.T) { t.Parallel() communityPercentage := 0.50 - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - RewardsSettings: config.RewardsSettings{ - CommunityPercentage: communityPercentage, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.CommunityPercentage = communityPercentage + economicsConfig.RewardsSettings.BurnPercentage = 0.2 + economicsConfig.RewardsSettings.LeaderPercentage = 0.3 + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.CommunityPercentage() assert.Equal(t, communityPercentage, value) @@ -47,11 +189,11 @@ func TestEconomicsData_LeaderPercentage(t *testing.T) { t.Parallel() leaderPercentage := 0.40 - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - RewardsSettings: config.RewardsSettings{ - LeaderPercentage: leaderPercentage, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.CommunityPercentage = 0.30 + economicsConfig.RewardsSettings.BurnPercentage = 0.30 + economicsConfig.RewardsSettings.LeaderPercentage = leaderPercentage + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.LeaderPercentage() assert.Equal(t, leaderPercentage, value) @@ -61,11 +203,11 @@ func TestEconomicsData_BurnPercentage(t *testing.T) { t.Parallel() burnPercentage := 0.41 - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - RewardsSettings: config.RewardsSettings{ - BurnPercentage: burnPercentage, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = burnPercentage + economicsConfig.RewardsSettings.CommunityPercentage = 0.29 + economicsConfig.RewardsSettings.LeaderPercentage = 0.3 + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.BurnPercentage() assert.Equal(t, burnPercentage, value) @@ -75,11 +217,9 @@ func TestEconomicsData_MinGasPrice(t *testing.T) { t.Parallel() minGasPrice := uint64(500) - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - FeeSettings: config.FeeSettings{ - MinGasPrice: minGasPrice, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.FeeSettings.MinGasPrice = strconv.FormatUint(minGasPrice, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.MinGasPrice() assert.Equal(t, minGasPrice, value) @@ -88,40 +228,22 @@ func TestEconomicsData_MinGasPrice(t *testing.T) { func TestEconomicsData_MinGasLimitForTx(t *testing.T) { t.Parallel() - minGasLimitForTx := uint64(1500) - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - FeeSettings: config.FeeSettings{ - MinGasLimitForTx: minGasLimitForTx, - }, - }) + minGasLimitForTx := uint64(1000) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.FeeSettings.MinGasLimitForTx = strconv.FormatUint(minGasLimitForTx, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.MinGasLimitForTx() assert.Equal(t, minGasLimitForTx, value) } -func TestEconomicsData_MinTxFee(t *testing.T) { - t.Parallel() - - minTxFee := uint64(502) - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - FeeSettings: config.FeeSettings{ - MinTxFee: minTxFee, - }, - }) - - value := economicsData.MinTxFee() - assert.Equal(t, minTxFee, value) -} - func TestEconomicsData_CommunityAddress(t *testing.T) { t.Parallel() communityAddress := "addr1" - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{ - CommunityAddress: communityAddress, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.EconomicsAddresses.CommunityAddress = communityAddress + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.CommunityAddress() assert.Equal(t, communityAddress, value) @@ -131,11 +253,9 @@ func TestEconomicsData_BurnAddress(t *testing.T) { t.Parallel() burnAddress := "addr2" - economicsData := economics.NewEconomicsData(&config.ConfigEconomics{ - EconomicsAddresses: config.EconomicsAddresses{ - BurnAddress: burnAddress, - }, - }) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.EconomicsAddresses.BurnAddress = burnAddress + economicsData, _ := economics.NewEconomicsData(economicsConfig) value := economicsData.BurnAddress() assert.Equal(t, burnAddress, value) diff --git a/process/errors.go b/process/errors.go index 782bac3ff2b..f6bfa673ee2 100644 --- a/process/errors.go +++ b/process/errors.go @@ -208,9 +208,6 @@ var ErrNilStorage = errors.New("nil storage") // ErrNilShardedDataCacherNotifier signals that a nil sharded data cacher notifier has been provided var ErrNilShardedDataCacherNotifier = errors.New("nil sharded data cacher notifier") -// ErrNilBlocksTracker signals that a nil blocks tracker has been provided -var ErrNilBlocksTracker = errors.New("nil blocks tracker") - // ErrInvalidTxInPool signals an invalid transaction in the transactions pool var ErrInvalidTxInPool = errors.New("invalid transaction in the transactions pool") @@ -498,3 +495,15 @@ var ErrInsufficientGasPriceInTx = errors.New("insufficient gas price in tx") // ErrInsufficientGasLimitInTx signals that a lower gas limit than required was provided var ErrInsufficientGasLimitInTx = errors.New("insufficient gas limit in tx") + +// ErrInvalidMinimumGasPrice signals that a invalid gas price has been read from config file +var ErrInvalidMinimumGasPrice = errors.New("invalid minimum gas price") + +// ErrInvalidMinimumGasLimitForTx signals that a invalid minimum gas limit for transactions has been read from config file +var ErrInvalidMinimumGasLimitForTx = errors.New("invalid minimum gas limit for transactions") + +// ErrInvalidRewardsValue signals that a invalid rewards value has been read from config file +var ErrInvalidRewardsValue = errors.New("invalid rewards value") + +// ErrInvalidRewardsPercentages signal that rewards percentages are not correct +var ErrInvalidRewardsPercentages = errors.New("invalid rewards percentages") diff --git a/process/interface.go b/process/interface.go index f0b6c932381..1b97e570646 100644 --- a/process/interface.go +++ b/process/interface.go @@ -409,7 +409,7 @@ type InterceptorThrottler interface { // RewardsHandler will return information about rewards type RewardsHandler interface { - RewardsValue() uint64 + RewardsValue() *big.Int CommunityPercentage() float64 LeaderPercentage() float64 BurnPercentage() float64 @@ -420,7 +420,6 @@ type RewardsHandler interface { type FeeHandler interface { MinGasPrice() uint64 MinGasLimitForTx() uint64 - MinTxFee() uint64 IsInterfaceNil() bool } diff --git a/process/mock/rewardsHandlerMock.go b/process/mock/rewardsHandlerMock.go index f6e29d90424..d9a1a696c22 100644 --- a/process/mock/rewardsHandlerMock.go +++ b/process/mock/rewardsHandlerMock.go @@ -1,13 +1,15 @@ package mock +import "math/big" + type RewardsHandlerMock struct { - RewardsValueCalled func() uint64 + RewardsValueCalled func() *big.Int CommunityPercentageCalled func() float64 LeaderPercentageCalled func() float64 BurnPercentageCalled func() float64 } -func (rhm *RewardsHandlerMock) RewardsValue() uint64 { +func (rhm *RewardsHandlerMock) RewardsValue() *big.Int { return rhm.RewardsValueCalled() } diff --git a/process/transaction/process.go b/process/transaction/process.go index 31203ee5806..d158f516c56 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -133,9 +133,12 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st txDataLen := int64(len(tx.Data)) txProc.mutTxFee.RLock() + minTxFee := big.NewInt(0).SetUint64(txProc.economicsFee.MinGasLimitForTx()) + minTxFee = minTxFee.Mul(minTxFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + minFee := big.NewInt(0) minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) - minFee = minFee.Add(minFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinTxFee())) + minFee = minFee.Add(minFee, minTxFee) txProc.mutTxFee.RUnlock() if minFee.Cmp(cost) > 0 { From b4812213124fdf067bea078fc19a2852fbca3596 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 8 Oct 2019 12:52:04 +0300 Subject: [PATCH 214/234] updated batch size for heartbeat storage --- cmd/node/config/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 83748a5413b..9786f4fe034 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -241,7 +241,7 @@ FilePath = "HeartbeatStorage" Type = "LvlDBSerial" BatchDelaySeconds = 15 - MaxBatchSize = 1 + MaxBatchSize = 300 MaxOpenFiles = 10 # Consensus type which will be used (the current implementation can manage "bn" and "bls") From cc74f2c3e5b70d7fe55963191f270216b80d72ad Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 8 Oct 2019 13:05:14 +0300 Subject: [PATCH 215/234] EN-4270 - update round info in elastic search for metachain --- process/block/export_test.go | 9 ----- process/block/metablock.go | 19 +++++++-- process/block/metrics.go | 69 ++++++++++++++++++++++++++++++++ process/block/metrics_test.go | 20 +++++++++ process/block/shardblock.go | 56 +------------------------- process/block/shardblock_test.go | 23 ++--------- 6 files changed, 109 insertions(+), 87 deletions(-) create mode 100644 process/block/metrics_test.go diff --git a/process/block/export_test.go b/process/block/export_test.go index 1e4b5b3db2e..83d19d5fb79 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -316,15 +316,6 @@ func (sp *shardProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } -func (sp *shardProcessor) CalculateRoundDuration( - lastBlockTimestamp uint64, - currentBlockTimestamp uint64, - lastBlockRound uint64, - currentBlockRound uint64, -) uint64 { - return sp.calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) -} - func (sp *shardProcessor) CreateBlockStarted() { sp.createBlockStarted() } diff --git a/process/block/metablock.go b/process/block/metablock.go index 8a10f9a8d34..79cb2651b9a 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -261,18 +261,27 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { return } -func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[string]*block.Header) { +func (mp *metaProcessor) indexBlock( + metaBlock data.HeaderHandler, + headerPool map[string]*block.Header, + lastMetaBlock data.HeaderHandler, +) { if mp.core == nil || mp.core.Indexer() == nil { return } - // Update tps benchmarks in the DB tpsBenchmark := mp.core.TPSBenchmark() if tpsBenchmark != nil { go mp.core.Indexer().UpdateTPS(tpsBenchmark) } - //TODO: maybe index metablocks also? + publicKeys, err := mp.nodesCoordinator.GetValidatorsPublicKeys(metaBlock.GetPrevRandSeed(), metaBlock.GetRound(), sharding.MetachainShardId) + if err != nil { + return + } + signersIndexes := mp.nodesCoordinator.GetValidatorsIndexes(publicKeys) + + saveRoundInfoInElastic(mp.core.Indexer(), mp.nodesCoordinator, sharding.MetachainShardId, metaBlock, lastMetaBlock, signersIndexes) } // removeBlockInfoFromPool removes the block info from associated pools @@ -541,6 +550,8 @@ func (mp *metaProcessor) CommitBlock( hdrsToAttestPreviousFinal := mp.nextKValidity + 1 mp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) + lastMetaBlock := chainHandler.GetCurrentBlockHeader() + err = chainHandler.SetCurrentBlockBody(body) if err != nil { return err @@ -557,7 +568,7 @@ func (mp *metaProcessor) CommitBlock( mp.core.TPSBenchmark().Update(header) } - mp.indexBlock(header, tempHeaderPool) + mp.indexBlock(header, tempHeaderPool, lastMetaBlock) mp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) diff --git a/process/block/metrics.go b/process/block/metrics.go index 72e3b7656ae..5d9f7cf2d54 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -1,9 +1,14 @@ package block import ( + "time" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/sharding" ) func getMetricsFromMetaHeader( @@ -70,3 +75,67 @@ func getMetricsFromHeader( appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, numTxWithDst) appStatusHandler.SetUInt64Value(core.MetricNumProcessedTxs, uint64(totalTx)) } + +func saveRoundInfoInElastic( + elasticIndexer indexer.Indexer, + nodesCoordinator sharding.NodesCoordinator, + shardId uint32, + header data.HeaderHandler, + lastHeader data.HeaderHandler, + signersIndexes []uint64, +) { + roundInfo := indexer.RoundInfo{ + Index: header.GetRound(), + SignersIndexes: signersIndexes, + BlockWasProposed: true, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp()), + } + + go elasticIndexer.SaveRoundInfo(roundInfo) + + if lastHeader == nil { + return + } + + lastBlockRound := lastHeader.GetRound() + currentBlockRound := header.GetRound() + roundDuration := calculateRoundDuration(lastHeader.GetTimeStamp(), header.GetTimeStamp(), lastBlockRound, currentBlockRound) + for i := lastBlockRound + 1; i < currentBlockRound; i++ { + publicKeys, err := nodesCoordinator.GetValidatorsPublicKeys(lastHeader.GetRandSeed(), i, shardId) + if err != nil { + continue + } + signersIndexes = nodesCoordinator.GetValidatorsIndexes(publicKeys) + roundInfo = indexer.RoundInfo{ + Index: i, + SignersIndexes: signersIndexes, + BlockWasProposed: true, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), + } + + go elasticIndexer.SaveRoundInfo(roundInfo) + } +} + +func calculateRoundDuration( + lastBlockTimestamp uint64, + currentBlockTimestamp uint64, + lastBlockRound uint64, + currentBlockRound uint64, +) uint64 { + if lastBlockTimestamp >= currentBlockTimestamp { + log.Error("last block timestamp is greater or equals than current block timestamp") + return 0 + } + if lastBlockRound >= currentBlockRound { + log.Error("last block round is greater or equals than current block round") + return 0 + } + + diffTimeStamp := currentBlockTimestamp - lastBlockTimestamp + diffRounds := currentBlockRound - lastBlockRound + + return diffTimeStamp / diffRounds +} diff --git a/process/block/metrics_test.go b/process/block/metrics_test.go new file mode 100644 index 00000000000..64bbe78822a --- /dev/null +++ b/process/block/metrics_test.go @@ -0,0 +1,20 @@ +package block + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetrics_CalculateRoundDuration(t *testing.T) { + t.Parallel() + + lastBlockTimestamp := uint64(80) + currentBlockTimestamp := uint64(100) + lastBlockRound := uint64(5) + currentBlockRound := uint64(10) + expectedRoundDuration := uint64(4) + + roundDuration := calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) + assert.Equal(t, expectedRoundDuration, roundDuration) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index b5e3c76b3f8..47311acd041 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -7,7 +7,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/serviceContainer" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -481,62 +480,9 @@ func (sp *shardProcessor) indexBlockIfNeeded( } signersIndexes := sp.nodesCoordinator.GetValidatorsIndexes(pubKeys) - roundInfo := indexer.RoundInfo{ - Index: header.GetRound(), - SignersIndexes: signersIndexes, - BlockWasProposed: true, - ShardId: shardId, - Timestamp: time.Duration(header.GetTimeStamp()), - } - go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) - go sp.core.Indexer().SaveRoundInfo(roundInfo) - - if lastBlockHeader == nil { - return - } - - lastBlockRound := lastBlockHeader.GetRound() - currentBlockRound := header.GetRound() - - roundDuration := sp.calculateRoundDuration(lastBlockHeader.GetTimeStamp(), header.GetTimeStamp(), lastBlockRound, currentBlockRound) - for i := lastBlockRound + 1; i < currentBlockRound; i++ { - publicKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(lastBlockHeader.GetRandSeed(), i, shardId) - if err != nil { - continue - } - signersIndexes = sp.nodesCoordinator.GetValidatorsIndexes(publicKeys) - roundInfo = indexer.RoundInfo{ - Index: i, - SignersIndexes: signersIndexes, - BlockWasProposed: true, - ShardId: shardId, - Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), - } - - go sp.core.Indexer().SaveRoundInfo(roundInfo) - } -} - -func (sp *shardProcessor) calculateRoundDuration( - lastBlockTimestamp uint64, - currentBlockTimestamp uint64, - lastBlockRound uint64, - currentBlockRound uint64, -) uint64 { - if lastBlockTimestamp >= currentBlockTimestamp { - log.Error("last block timestamp is greater or equals than current block timestamp") - return 0 - } - if lastBlockRound >= currentBlockRound { - log.Error("last block round is greater or equals than current block round") - return 0 - } - - diffTimeStamp := currentBlockTimestamp - lastBlockTimestamp - diffRounds := currentBlockRound - lastBlockRound - return diffTimeStamp / diffRounds + saveRoundInfoInElastic(sp.core.Indexer(), sp.nodesCoordinator, shardId, header, lastBlockHeader, signersIndexes) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index d1e8da10797..90ad9e14ed1 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2470,12 +2470,12 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi Nonce: 1, Round: 1, ShardInfo: []block.ShardData{ - block.ShardData{ + { ShardId: 1, ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ - block.ShardMiniBlockHeader{Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, - block.ShardMiniBlockHeader{Hash: miniBlockHash2, SenderShardId: 1, ReceiverShardId: 0}, - block.ShardMiniBlockHeader{Hash: miniBlockHash3, SenderShardId: 1, ReceiverShardId: 0}, + {Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, + {Hash: miniBlockHash2, SenderShardId: 1, ReceiverShardId: 0}, + {Hash: miniBlockHash3, SenderShardId: 1, ReceiverShardId: 0}, }}, }} @@ -4172,18 +4172,3 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { assert.Nil(t, err) assert.True(t, sp.IsMiniBlockProcessed(metaHash, testMBHash)) } - -func TestNewShardProcessor_CalculateRoundDuration(t *testing.T) { - t.Parallel() - - arguments := CreateMockArguments() - sp, _ := blproc.NewShardProcessor(arguments) - lastBlockTimestamp := uint64(80) - currentBlockTimestamp := uint64(100) - lastBlockRound := uint64(5) - currentBlockRound := uint64(10) - expectedRoundDuration := uint64(4) - - roundDuration := sp.CalculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) - assert.Equal(t, expectedRoundDuration, roundDuration) -} From f4cb3a2d049049140150f93367609ef789a46564 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 8 Oct 2019 13:36:49 +0300 Subject: [PATCH 216/234] * Removed duplicate code --- cmd/node/config/economics.toml | 13 +- process/block/baseProcess.go | 51 ++++++ process/block/export_test.go | 91 +++++------ process/block/metablock.go | 262 ++++++++++--------------------- process/block/metablock_test.go | 179 ++++++++------------- process/block/shardblock.go | 109 +++---------- process/block/shardblock_test.go | 2 +- 7 files changed, 278 insertions(+), 429 deletions(-) diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index d22149ba812..e57a168557a 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -4,12 +4,15 @@ BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" [RewardsSettings] - RewardsValue = 10000000000000000000 + RewardsValue = 1000 CommunityPercentage = 0.10 LeaderPercentage = 0.50 - BurnPercentage = 0.40 + BurnPercentage = 0.40 [FeeSettings] - MinGasPrice = 1000000000000000 - MinGasLimitForTx = 1000 - MinTxFee = 1000000000000000000 + MinGasPrice = 0 + MinGasLimitForTx = 5 + MinTxFee = 0 + + + diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index c7870919a71..b8ec6deb16d 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -563,3 +563,54 @@ func (bp *baseProcessor) createBlockStarted() { bp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } + +//TODO: remove bool parameter and give instead the set to sort +func (bp *baseProcessor) sortHeadersForCurrentBlockByNonce(usedInBlock bool) map[uint32][]data.HeaderHandler { + hdrsForCurrentBlock := make(map[uint32][]data.HeaderHandler) + + bp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, hdrInfo := range bp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + hdrsForCurrentBlock[hdrInfo.hdr.GetShardID()] = append(hdrsForCurrentBlock[hdrInfo.hdr.GetShardID()], hdrInfo.hdr) + } + bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + // sort headers for each shard + for shardId := uint32(0); shardId < bp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := hdrsForCurrentBlock[shardId] + process.SortHeadersByNonce(hdrsForShard) + } + + return hdrsForCurrentBlock +} + +//TODO: remove bool parameter and give instead the set to sort +func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool) [][]byte { + hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) + + bp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for metaBlockHash, hdrInfo := range bp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + hdrsForCurrentBlockInfo = append(hdrsForCurrentBlockInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) + } + bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + if len(hdrsForCurrentBlockInfo) > 1 { + sort.Slice(hdrsForCurrentBlockInfo, func(i, j int) bool { + return hdrsForCurrentBlockInfo[i].nonce < hdrsForCurrentBlockInfo[j].nonce + }) + } + + hdrsHashesForCurrentBlock := make([][]byte, len(hdrsForCurrentBlockInfo)) + for i := 0; i < len(hdrsForCurrentBlockInfo); i++ { + hdrsHashesForCurrentBlock[i] = hdrsForCurrentBlockInfo[i].hash + } + + return hdrsHashesForCurrentBlock +} diff --git a/process/block/export_test.go b/process/block/export_test.go index df3abe95936..0f251c7d98b 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -106,45 +106,40 @@ func (mp *metaProcessor) RequestBlockHeaders(header *block.MetaBlock) (uint32, u return mp.requestShardHeaders(header) } -func (mp *metaProcessor) RemoveBlockInfoFromPool(header *block.MetaBlock) error { - return mp.removeBlockInfoFromPool(header) +func (mp *metaProcessor) RemoveBlockInfoFromPool() error { + return mp.removeBlockInfoFromPool() } func (mp *metaProcessor) ReceivedHeader(hdrHash []byte) { mp.receivedHeader(hdrHash) } -func (mp *metaProcessor) AddHdrHashToRequestedList(hdrHash []byte) { - mp.mutRequestedShardHdrsHashes.Lock() - defer mp.mutRequestedShardHdrsHashes.Unlock() +func (mp *metaProcessor) AddHdrHashToRequestedList(hdr *block.Header, hdrHash []byte) { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if mp.requestedShardHdrsHashes == nil { - mp.requestedShardHdrsHashes = make(map[string]bool) - mp.allNeededShardHdrsFound = true + if mp.hdrsForCurrBlock.hdrHashAndInfo == nil { + mp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) } - if mp.currHighestShardHdrsNonces == nil { - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } + if mp.hdrsForCurrBlock.highestHdrNonce == nil { + mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) } - mp.requestedShardHdrsHashes[string(hdrHash)] = true - mp.allNeededShardHdrsFound = false + mp.hdrsForCurrBlock.hdrHashAndInfo[string(hdrHash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + mp.hdrsForCurrBlock.missingHdrs++ } -func (mp *metaProcessor) SetCurrHighestShardHdrsNonces(key uint32, value uint64) { - mp.currHighestShardHdrsNonces[key] = value -} - -func (mp *metaProcessor) IsHdrHashRequested(hdrHash []byte) bool { - mp.mutRequestedShardHdrsHashes.Lock() - defer mp.mutRequestedShardHdrsHashes.Unlock() +func (mp *metaProcessor) IsHdrMissing(hdrHash []byte) bool { + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - _, found := mp.requestedShardHdrsHashes[string(hdrHash)] + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(hdrHash)] + if !ok { + return true + } - return found + return hdrInfo.hdr == nil || hdrInfo.hdr.IsInterfaceNil() } func (mp *metaProcessor) CreateShardInfo(maxItemsInBlock uint32, round uint64, haveTime func() bool) ([]block.ShardData, error) { @@ -155,8 +150,11 @@ func (mp *metaProcessor) ProcessBlockHeaders(header *block.MetaBlock, round uint return mp.processBlockHeaders(header, round, haveTime) } -func (mp *metaProcessor) RequestFinalMissingHeaders() uint32 { - return mp.requestFinalMissingHeaders() +func (mp *metaProcessor) RequestMissingFinalityAttestingHeaders() uint32 { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + + return mp.requestMissingFinalityAttestingHeaders() } func (bp *baseProcessor) NotarizedHdrs() map[uint32][]data.HeaderHandler { @@ -179,22 +177,20 @@ func (bp *baseProcessor) SetHasher(hasher hashing.Hasher) { bp.hasher = hasher } -func (mp *metaProcessor) SetNextKValidity(val uint32) { - mp.mutRequestedShardHdrsHashes.Lock() +func (mp *metaProcessor) SetShardBlockFinality(val uint32) { mp.shardBlockFinality = val - mp.mutRequestedShardHdrsHashes.Unlock() } -func (mp *metaProcessor) SaveLastNotarizedHeader(header *block.MetaBlock) error { - return mp.saveLastNotarizedHeader(header) +func (mp *metaProcessor) SaveLastNotarizedHeader() error { + return mp.saveLastNotarizedHeader() } -func (mp *metaProcessor) CheckShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { - return mp.checkShardHeadersValidity(header) +func (mp *metaProcessor) CheckShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { + return mp.checkShardHeadersValidity() } -func (mp *metaProcessor) CheckShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - return mp.checkShardHeadersFinality(header, highestNonceHdrs) +func (mp *metaProcessor) CheckShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { + return mp.checkShardHeadersFinality(highestNonceHdrs) } func (bp *baseProcessor) IsHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { @@ -246,6 +242,9 @@ func (sp *shardProcessor) GetHashAndHdrStruct(header data.HeaderHandler, hash [] } func (sp *shardProcessor) RequestMissingFinalityAttestingHeaders() uint32 { + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + return sp.requestMissingFinalityAttestingHeaders() } @@ -270,10 +269,6 @@ func (bp *baseProcessor) SetBlockSizeThrottler(blockSizeThrottler process.BlockS bp.blockSizeThrottler = blockSizeThrottler } -func (sp *shardProcessor) SetHighestHdrNonceForCurrentBlock(value uint64) { - sp.hdrsForCurrBlock.highestHdrNonce = value -} - func (sp *shardProcessor) DisplayLogInfo( header *block.Header, body block.Body, @@ -310,10 +305,16 @@ func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockH sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) } -func (sp *shardProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler data.HeaderHandler, usedInBlock bool) { - sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - sp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} - sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +func (bp *baseProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler data.HeaderHandler, usedInBlock bool) { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (bp *baseProcessor) SetHighestHdrNonceForCurrentBlock(shardId uint32, value uint64) { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.highestHdrNonce[shardId] = value + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } func (sp *shardProcessor) CalculateRoundDuration( @@ -325,8 +326,8 @@ func (sp *shardProcessor) CalculateRoundDuration( return sp.calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) } -func (sp *shardProcessor) CreateBlockStarted() { - sp.createBlockStarted() +func (bp *baseProcessor) CreateBlockStarted() { + bp.createBlockStarted() } func (sp *shardProcessor) AddProcessedCrossMiniBlocksFromHeader(header *block.Header) error { diff --git a/process/block/metablock.go b/process/block/metablock.go index 5f4bf43c15f..c5f1b18f8a3 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -20,7 +20,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/ElrondNetwork/elrond-go/storage" ) // metaProcessor implements metaProcessor interface and actually it tries to execute block @@ -207,12 +206,12 @@ func (mp *metaProcessor) ProcessBlock( go mp.checkAndRequestIfShardHeadersMissing(header.Round) }() - highestNonceHdrs, err := mp.checkShardHeadersValidity(header) + highestNonceHdrs, err := mp.checkShardHeadersValidity() if err != nil { return err } - err = mp.checkShardHeadersFinality(header, highestNonceHdrs) + err = mp.checkShardHeadersFinality(highestNonceHdrs) if err != nil { return err } @@ -250,9 +249,9 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { // map from *block.Header to dataHandler - sortedHdrs := make([]data.HeaderHandler, 0) + sortedHdrs := make([]data.HeaderHandler, len(sortedHdrPerShard[i])) for j := 0; j < len(sortedHdrPerShard[i]); j++ { - sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) + sortedHdrs[j] = sortedHdrPerShard[i][j] } err := mp.requestHeadersIfMissing(sortedHdrs, i, round) @@ -265,7 +264,7 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { return } -func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[string]*block.Header) { +func (mp *metaProcessor) indexBlock() { if mp.core == nil || mp.core.Indexer() == nil { return } @@ -280,11 +279,7 @@ func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[s } // removeBlockInfoFromPool removes the block info from associated pools -func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { - if header == nil || header.IsInterfaceNil() { - return process.ErrNilMetaBlockHeader - } - +func (mp *metaProcessor) removeBlockInfoFromPool() error { headerPool := mp.dataPool.ShardHeaders() if headerPool == nil || headerPool.IsInterfaceNil() { return process.ErrNilHeadersDataPool @@ -295,22 +290,22 @@ func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error return process.ErrNilHeadersNoncesDataPool } - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - obj, ok := headerPool.Peek(shardData.HeaderHash) - if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for shardBlockHash, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { continue } - hdr, ok := obj.(*block.Header) + shardBlock, ok := hdrInfo.hdr.(*block.Header) if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrWrongTypeAssertion } - headerPool.Remove(shardData.HeaderHash) - headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) + headerPool.Remove([]byte(shardBlockHash)) + headerNoncesPool.Remove(shardBlock.Nonce, shardBlock.ShardId) } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil } @@ -338,10 +333,9 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, return process.ErrNilHeadersNoncesDataPool } - hdrHashes := make([][]byte, 0) + hdrHashes := make([][]byte, len(header.ShardInfo)) for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - hdrHashes = append(hdrHashes, shardData.HeaderHash) + hdrHashes[i] = header.ShardInfo[i].HeaderHash } for _, hdrHash := range hdrHashes { @@ -387,10 +381,7 @@ func (mp *metaProcessor) CreateBlockBody(round uint64, haveTime func() bool) (da } func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint64, haveTime func() time.Duration) error { - hdrPool := mp.dataPool.ShardHeaders() - msg := "" - for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { @@ -403,10 +394,10 @@ func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint err := mp.checkAndProcessShardMiniBlockHeader( headerHash, shardMiniBlockHeader, - hdrPool, round, shardData.ShardId, ) + if err != nil { return err } @@ -436,8 +427,6 @@ func (mp *metaProcessor) CommitBlock( } }() - tempHeaderPool := make(map[string]*block.Header) - err = checkForNils(chainHandler, headerHandler, bodyHandler) if err != nil { return err @@ -488,34 +477,39 @@ func (mp *metaProcessor) CommitBlock( return err } - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if header == nil { - return err + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for shardBlockHash, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue } - mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) + shardBlock, ok := hdrInfo.hdr.(*block.Header) + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrWrongTypeAssertion + } - tempHeaderPool[string(shardData.HeaderHash)] = header + mp.updateShardHeadersNonce(shardBlock.ShardId, shardBlock.Nonce) - buff, err = mp.marshalizer.Marshal(header) + buff, err = mp.marshalizer.Marshal(shardBlock) if err != nil { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return err } - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) + nonceToByteSlice := mp.uint64Converter.ToByteSlice(shardBlock.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardBlock.ShardId) + errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, []byte(shardBlockHash)) log.LogIfError(errNotCritical) - errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) + errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, []byte(shardBlockHash), buff) log.LogIfError(errNotCritical) } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() mp.saveMetricCrossCheckBlockHeight() - err = mp.saveLastNotarizedHeader(header) + err = mp.saveLastNotarizedHeader() if err != nil { return err } @@ -529,7 +523,7 @@ func (mp *metaProcessor) CommitBlock( header.Nonce, core.ToB64(headerHash))) - errNotCritical = mp.removeBlockInfoFromPool(header) + errNotCritical = mp.removeBlockInfoFromPool() if errNotCritical != nil { log.Info(errNotCritical.Error()) } @@ -562,7 +556,7 @@ func (mp *metaProcessor) CommitBlock( mp.core.TPSBenchmark().Update(header) } - mp.indexBlock(header, tempHeaderPool) + mp.indexBlock() mp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) @@ -614,7 +608,7 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) } -func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error { +func (mp *metaProcessor) saveLastNotarizedHeader() error { mp.mutNotarizedHdrs.Lock() defer mp.mutNotarizedHdrs.Unlock() @@ -627,17 +621,23 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) } - for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - return err + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue } - if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { - tmpLastNotarizedHdrForShard[header.ShardId] = header + shardHdr, ok := hdrInfo.hdr.(*block.Header) + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrWrongTypeAssertion + } + + if tmpLastNotarizedHdrForShard[shardHdr.ShardId].GetNonce() < shardHdr.Nonce { + tmpLastNotarizedHdrForShard[shardHdr.ShardId] = shardHdr } } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) @@ -647,45 +647,9 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error return nil } -// gets all the headers from the metablock in sorted order per shard -func (mp *metaProcessor) getSortedShardHdrsFromMetablock(metaBlock *block.MetaBlock) (map[uint32][]*block.Header, error) { - sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) - - requestedHeaders := 0 - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - log.Debug(err.Error()) - requestedHeaders++ - go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) - continue - } - - sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) - } - - if requestedHeaders > 0 { - return nil, process.ErrMissingHeader - } - - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) <= 1 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() - }) - } - - return sortedShardHdrs, nil -} - // check if shard headers were signed and constructed correctly and returns headers which has to be // checked for finality -func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { +func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { mp.mutNotarizedHdrs.RLock() if mp.notarizedHdrs == nil { mp.mutNotarizedHdrs.RUnlock() @@ -698,25 +662,27 @@ func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map } mp.mutNotarizedHdrs.RUnlock() - sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) - if err != nil { - return nil, err + highestNonceHdrs := make(map[uint32]data.HeaderHandler) + + usedShardHdrs := mp.sortHeadersForCurrentBlockByNonce(true) + if len(usedShardHdrs) == 0 { + return highestNonceHdrs, nil } - highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] + for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { + hdrsForShard := usedShardHdrs[shardId] if len(hdrsForShard) == 0 { continue } - for i := 0; i < len(hdrsForShard); i++ { - err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) + for _, shardHdr := range hdrsForShard { + err := mp.isHdrConstructionValid(shardHdr, tmpLastNotarized[shardId]) if err != nil { return nil, err } - tmpLastNotarized[shId] = hdrsForShard[i] - highestNonceHdrs[shId] = hdrsForShard[i] + + tmpLastNotarized[shardId] = shardHdr + highestNonceHdrs[shardId] = shardHdr } } @@ -724,39 +690,34 @@ func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map } // check if shard headers are final by checking if newer headers were constructed upon them -func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - if header == nil { - return process.ErrNilBlockHeader - } - - //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done - sortedHdrPerShard, err := mp.getFinalityAttestingHeaders(highestNonceHdrs, process.ShardBlockFinality) - if err != nil { - return err - } +func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { + finalityAttestingShardHdrs := mp.sortHeadersForCurrentBlockByNonce(false) for index, lastVerifiedHdr := range highestNonceHdrs { - if index != lastVerifiedHdr.GetShardID() { + if lastVerifiedHdr == nil || lastVerifiedHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if lastVerifiedHdr.GetShardID() != index { return process.ErrShardIdMissmatch } // verify if there are "K" block after current to make this one final nextBlocksVerified := uint32(0) - shId := lastVerifiedHdr.GetShardID() - for i := 0; i < len(sortedHdrPerShard[shId]); i++ { + shardId := lastVerifiedHdr.GetShardID() + for _, shardHdr := range finalityAttestingShardHdrs[shardId] { if nextBlocksVerified >= mp.shardBlockFinality { break } // found a header with the next nonce - tmpHdr := sortedHdrPerShard[shId][i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if shardHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(shardHdr, lastVerifiedHdr) if err != nil { + log.Debug(err.Error()) continue } - lastVerifiedHdr = tmpHdr + lastVerifiedHdr = shardHdr nextBlocksVerified += 1 } } @@ -770,57 +731,6 @@ func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, high return nil } -func (mp *metaProcessor) getFinalityAttestingHeaders( - highestNonceHdrs map[uint32]data.HeaderHandler, - finality uint64, -) (map[uint32][]*block.Header, error) { - - shardHeadersPool := mp.dataPool.ShardHeaders() - if shardHeadersPool == nil { - return nil, process.ErrNilShardBlockPool - } - - headersMap := make(map[uint32][]*block.Header) - // get keys and arrange them into shards - for _, key := range shardHeadersPool.Keys() { - val, _ := shardHeadersPool.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.Header) - if !ok { - continue - } - - if highestNonceHdrs[hdr.ShardId] == nil { - continue - } - - isHdrNonceLowerOrEqualThanHighestNonce := hdr.GetNonce() <= highestNonceHdrs[hdr.ShardId].GetNonce() - isHdrNonceHigherThanFinalNonce := hdr.GetNonce() > highestNonceHdrs[hdr.ShardId].GetNonce()+finality - - if isHdrNonceLowerOrEqualThanHighestNonce || - isHdrNonceHigherThanFinalNonce { - continue - } - - headersMap[hdr.ShardId] = append(headersMap[hdr.ShardId], hdr) - } - - // sort headers for each shard - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := headersMap[shardId] - if len(hdrsForShard) > 1 { - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() - }) - } - } - - return headersMap, nil -} - func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { if currHdr == nil { return false, nil @@ -1017,14 +927,9 @@ func (mp *metaProcessor) computeMissingAndExistingShardHeaders(metaBlock *block. func (mp *metaProcessor) checkAndProcessShardMiniBlockHeader( headerHash []byte, shardMiniBlockHeader *block.ShardMiniBlockHeader, - hdrPool storage.Cacher, round uint64, shardId uint32, ) error { - - if hdrPool == nil || hdrPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } // TODO: real processing has to be done here, using metachain state return nil } @@ -1047,11 +952,6 @@ func (mp *metaProcessor) createShardInfo( return shardInfo, nil } - hdrPool := mp.dataPool.ShardHeaders() - if hdrPool == nil { - return nil, process.ErrNilHeadersDataPool - } - mbHdrs := uint32(0) timeBefore := time.Now() @@ -1082,6 +982,7 @@ func (mp *metaProcessor) createShardInfo( } mp.mutNotarizedHdrs.RUnlock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for index := range orderedHdrs { shId := orderedHdrs[index].ShardId @@ -1120,7 +1021,6 @@ func (mp *metaProcessor) createShardInfo( err := mp.checkAndProcessShardMiniBlockHeader( orderedHdrHashes[index], &shardMiniBlockHeader, - hdrPool, round, shardData.ShardId, ) @@ -1145,9 +1045,11 @@ func (mp *metaProcessor) createShardInfo( if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return shardInfo, nil } } @@ -1157,16 +1059,20 @@ func (mp *metaProcessor) createShardInfo( if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return shardInfo, nil } if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } } + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) return shardInfo, nil @@ -1244,7 +1150,7 @@ func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte return nil, nil, nil, process.ErrNilShardBlockPool } - hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) + hashAndBlockMap := make(map[uint32][]*hashAndHdr) headersMap := make(map[uint32][]*block.Header) headers := make([]*block.Header, 0) hdrHashes := make([][]byte, 0) diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 271413ff620..5342270817d 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -509,7 +509,7 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState }() // should return err - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) hdr.ShardInfo = make([]block.ShardData, 0) err := mp.ProcessBlock(blkc, hdr, body, haveTime) @@ -608,11 +608,11 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { } return cs } - mp.AddHdrHashToRequestedList([]byte("header_hash")) - mp.SetCurrHighestShardHdrsNonces(0, 1) - mp.SetCurrHighestShardHdrsNonces(1, 2) - mp.SetCurrHighestShardHdrsNonces(2, 3) - res := mp.RequestFinalMissingHeaders() + mp.AddHdrHashToRequestedList(&block.Header{}, []byte("header_hash")) + mp.SetHighestHdrNonceForCurrentBlock(0, 1) + mp.SetHighestHdrNonceForCurrentBlock(1, 2) + mp.SetHighestHdrNonceForCurrentBlock(2, 3) + res := mp.RequestMissingFinalityAttestingHeaders() assert.Equal(t, res, uint32(3)) } @@ -778,50 +778,6 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilDataPoolHolder, err) } -func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { - t.Parallel() - - mdp := initMetaDataPool() - hdr := createMetaBlockHeader() - body := &block.MetaBlockBody{} - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - fd := &mock.ForkDetectorMock{} - hasher := &mock.HasherStub{} - store := initStore() - - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - fd, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - hasher, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - - mdp.ShardHeadersCalled = func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } - } - - blkc := createTestBlockchain() - err := mp.CommitBlock(blkc, hdr, body) - assert.Equal(t, process.ErrMissingHeader, err) -} - func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { t.Parallel() @@ -897,6 +853,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { blkc := createTestBlockchain() + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) err := mp.CommitBlock(blkc, hdr, body) assert.Nil(t, err) assert.True(t, removeHdrWasCalled) @@ -944,30 +901,6 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { assert.Equal(t, uint32(1), hdrsRequested) } -func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *testing.T) { - t.Parallel() - - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - mock.NewNodesCoordinatorMock(), - &mock.SpecialAddressHandlerMock{}, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - err := mp.RemoveBlockInfoFromPool(nil) - assert.NotNil(t, err) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) -} - func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { t.Parallel() @@ -987,8 +920,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { &mock.RequestHandlerMock{}, &mock.Uint64ByteSliceConverterMock{}, ) - header := createMetaBlockHeader() - err := mp.RemoveBlockInfoFromPool(header) + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) + err := mp.RemoveBlockInfoFromPool() assert.Nil(t, err) } @@ -1111,7 +1044,7 @@ func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { //------- receivedHeader -func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { +func TestMetaProcessor_ReceivedHeaderShouldDecreaseMissing(t *testing.T) { t.Parallel() hasher := mock.HasherMock{} @@ -1139,18 +1072,20 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") - mp.AddHdrHashToRequestedList(hdrHash1) - mp.AddHdrHashToRequestedList(hdrHash2) - mp.AddHdrHashToRequestedList(hdrHash3) + hdr2 := &block.Header{Nonce: 2} + + mp.AddHdrHashToRequestedList(nil, hdrHash1) + mp.AddHdrHashToRequestedList(nil, hdrHash2) + mp.AddHdrHashToRequestedList(nil, hdrHash3) //received txHash2 - hdr := &block.Header{Nonce: 1} - pool.ShardHeaders().Put(hdrHash2, hdr) - mp.ReceivedHeader(hdrHash2) + pool.ShardHeaders().Put(hdrHash2, hdr2) + + time.Sleep(100 * time.Millisecond) - assert.True(t, mp.IsHdrHashRequested(hdrHash1)) - assert.False(t, mp.IsHdrHashRequested(hdrHash2)) - assert.True(t, mp.IsHdrHashRequested(hdrHash3)) + assert.True(t, mp.IsHdrMissing(hdrHash1)) + assert.False(t, mp.IsHdrMissing(hdrHash2)) + assert.True(t, mp.IsHdrMissing(hdrHash3)) } //------- createShardInfo @@ -1344,7 +1279,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders3}) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) round := uint64(40) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1512,7 +1447,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash3, headers[4]) pool.ShardHeaders().Put(hdrHash33, headers[5]) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) round := uint64(15) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1680,7 +1615,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash3, headers[4]) pool.ShardHeaders().Put(hdrHash33, headers[5]) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) round := uint64(20) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1848,16 +1783,17 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) // test header not in pool and defer called - err := mp.SaveLastNotarizedHeader(metaHdr) - assert.Equal(t, process.ErrMissingHeader, err) + err := mp.SaveLastNotarizedHeader() notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) // wrong header type in pool and defer called pool.ShardHeaders().Put(currHash, metaHdr) pool.ShardHeaders().Put(prevHash, prevHdr) + mp.SetHdrForCurrentBlock(currHash, metaHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - err = mp.SaveLastNotarizedHeader(metaHdr) + err = mp.SaveLastNotarizedHeader() assert.Equal(t, process.ErrWrongTypeAssertion, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) @@ -1865,8 +1801,11 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { // put headers in pool pool.ShardHeaders().Put(currHash, currHdr) pool.ShardHeaders().Put(prevHash, prevHdr) + mp.CreateBlockStarted() + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - err = mp.SaveLastNotarizedHeader(metaHdr) + err = mp.SaveLastNotarizedHeader() assert.Nil(t, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, currHdr, mp.LastNotarizedHdrForShard(currHdr.ShardId)) @@ -1953,7 +1892,10 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { shDataPrev := block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - _, err := mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(wrongCurrHash, wrongCurrHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + + _, err := mp.CheckShardHeadersValidity() assert.Equal(t, process.ErrWrongNonceInBlock, err) shDataCurr = block.ShardData{ShardId: 0, HeaderHash: currHash} @@ -1962,7 +1904,11 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { shDataPrev = block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) + mp.CreateBlockStarted() + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + + highestNonceHdrs, err := mp.CheckShardHeadersValidity() assert.Nil(t, err) assert.NotNil(t, highestNonceHdrs) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -2022,7 +1968,9 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi metaHdr.ShardInfo = make([]block.ShardData, 0) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + + highestNonceHdrs, err := mp.CheckShardHeadersValidity() assert.Nil(t, highestNonceHdrs) assert.Equal(t, process.ErrWrongNonceInBlock, err) } @@ -2081,12 +2029,12 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) metaHdr.ShardInfo = make([]block.ShardData, 0) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) - assert.Nil(t, highestNonceHdrs) - assert.Equal(t, process.ErrMissingHeader, err) + highestNonceHdrs, err := mp.CheckShardHeadersValidity() + assert.Equal(t, 0, len(highestNonceHdrs)) pool.ShardHeaders().Put(currHash, currHdr) - highestNonceHdrs, err = mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + highestNonceHdrs, err = mp.CheckShardHeadersValidity() assert.NotNil(t, highestNonceHdrs) assert.Nil(t, err) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -2163,29 +2111,33 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { prevHash, _ = mp.ComputeHeaderHash(nextWrongHdr) pool.ShardHeaders().Put(prevHash, nextWrongHdr) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) metaHdr := &block.MetaBlock{Round: 1} highestNonceHdrs := make(map[uint32]data.HeaderHandler) for i := uint32(0); i < noOfShards; i++ { - highestNonceHdrs[i] = mp.LastNotarizedHdrForShard(i) + highestNonceHdrs[i] = nil } - err := mp.CheckShardHeadersFinality(nil, highestNonceHdrs) + err := mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Equal(t, process.ErrNilBlockHeader, err) + for i := uint32(0); i < noOfShards; i++ { + highestNonceHdrs[i] = mp.LastNotarizedHdrForShard(i) + } + // should work for empty highest nonce hdrs - no hdrs added this round to metablock - err = mp.CheckShardHeadersFinality(metaHdr, nil) + err = mp.CheckShardHeadersFinality(nil) assert.Nil(t, err) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) highestNonceHdrs = make(map[uint32]data.HeaderHandler, 0) highestNonceHdrs[0] = currHdr - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Nil(t, err) - mp.SetNextKValidity(1) - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + mp.SetShardBlockFinality(1) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Equal(t, process.ErrHeaderNotFinal, err) prevHash, _ = mp.ComputeHeaderHash(currHdr) @@ -2198,11 +2150,12 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { PrevHash: prevHash, RootHash: []byte("currRootHash")} - prevHash, _ = mp.ComputeHeaderHash(nextHdr) - pool.ShardHeaders().Put(prevHash, nextHdr) + nextHash, _ := mp.ComputeHeaderHash(nextHdr) + pool.ShardHeaders().Put(nextHash, nextHdr) + mp.SetHdrForCurrentBlock(nextHash, nextHdr, false) metaHdr.Round = 20 - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Nil(t, err) } @@ -2402,12 +2355,12 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { assert.False(t, valid) assert.Nil(t, hdrIds) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, prevHdr, srtShardHdrs) assert.True(t, valid) assert.NotNil(t, hdrIds) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) nextWrongHdr := &block.Header{ Round: 12, Nonce: 44, diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c8c078ac61b..eeac04e35b4 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -25,7 +25,7 @@ const maxCleanTime = time.Second type shardProcessor struct { *baseProcessor dataPool dataRetriever.PoolsHolder - metaBlockFinality int + metaBlockFinality uint32 chRcvAllMetaHdrs chan bool @@ -307,16 +307,12 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return err } - usedMetaHdrs, err := sp.sortMetaHeadersForCurrentBlockByNonce(true) - if err != nil { - return err - } - - if len(usedMetaHdrs) == 0 { + usedMetaHdrs := sp.sortHeadersForCurrentBlockByNonce(true) + if len(usedMetaHdrs[sharding.MetachainShardId]) == 0 { return nil } - for _, metaHdr := range usedMetaHdrs { + for _, metaHdr := range usedMetaHdrs[sharding.MetachainShardId] { err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err @@ -339,22 +335,19 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error return process.ErrNilBlockHeader } - finalityAttestingMetaHdrs, err := sp.sortMetaHeadersForCurrentBlockByNonce(false) - if err != nil { - return err - } + finalityAttestingMetaHdrs := sp.sortHeadersForCurrentBlockByNonce(false) lastVerifiedHdr := header // verify if there are "K" block after current to make this one final - nextBlocksVerified := 0 - for _, metaHdr := range finalityAttestingMetaHdrs { + nextBlocksVerified := uint32(0) + for _, metaHdr := range finalityAttestingMetaHdrs[sharding.MetachainShardId] { if nextBlocksVerified >= sp.metaBlockFinality { break } // found a header with the next nonce - if metaHdr.Nonce == lastVerifiedHdr.GetNonce()+1 { - err = sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) + if metaHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { log.Debug(err.Error()) continue @@ -375,7 +368,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { - mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) + mbHashesFromHdr := make(map[string]*block.MiniBlockHeader, len(hdr.MiniBlockHeaders)) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] } @@ -839,10 +832,10 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( process.SortHeadersByNonce(ownShIdHdrs) - ownShIdHdrsHashes := make([][]byte, 0) + ownShIdHdrsHashes := make([][]byte, len(ownShIdHdrs)) for i := 0; i < len(ownShIdHdrs); i++ { hash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, ownShIdHdrs[i]) - ownShIdHdrsHashes = append(ownShIdHdrsHashes, hash) + ownShIdHdrsHashes[i] = hash } return ownShIdHdrs, ownShIdHdrsHashes, nil @@ -886,7 +879,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header *block. return nil, process.ErrNilBlockHeader } - miniBlockHashes := make(map[int][]byte, 0) + miniBlockHashes := make(map[int][]byte, len(header.MiniBlockHeaders)) for i := 0; i < len(header.MiniBlockHeaders); i++ { miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash } @@ -906,7 +899,7 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header *block.He return process.ErrNilBlockHeader } - miniBlockHashes := make(map[int][]byte, 0) + miniBlockHashes := make(map[int][]byte, len(header.MiniBlockHeaders)) for i := 0; i < len(header.MiniBlockHeaders); i++ { miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash } @@ -945,7 +938,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlocks( usedMiniBlocks []*block.MiniBlock, ) ([]data.HeaderHandler, error) { - miniBlockHashes := make(map[int][]byte, 0) + miniBlockHashes := make(map[int][]byte) for i := 0; i < len(usedMiniBlocks); i++ { if usedMiniBlocks[i].SenderShardID == sp.shardCoordinator.SelfId() { continue @@ -1265,21 +1258,21 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64) (map[string if !hdrInfo.usedInBlock { continue } - hdr, ok := hdrInfo.hdr.(*block.MetaBlock) + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { continue } - if hdr.GetRound() > round { + if metaBlock.GetRound() > round { continue } - if hdr.GetRound() <= lastHdr.GetRound() { + if metaBlock.GetRound() <= lastHdr.GetRound() { continue } - if hdr.GetNonce() <= lastHdr.GetNonce() { + if metaBlock.GetNonce() <= lastHdr.GetNonce() { continue } - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { miniBlockMetaHashes[hash] = []byte(metaBlockHash) } @@ -1345,7 +1338,7 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd // verify if there are "K" block after current to make this one final lastVerifiedHdr := currHdr - nextBlocksVerified := 0 + nextBlocksVerified := uint32(0) for i := startPos; i < len(sortedHdrs); i++ { if nextBlocksVerified >= sp.metaBlockFinality { @@ -1590,12 +1583,11 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round header.MiniBlockHeaders = miniBlockHeaders header.TxCount = uint32(totalTxCount) + header.MetaBlockHashes = sp.sortHeaderHashesForCurrentBlockByNonce(true) sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) - header.MetaBlockHashes = sp.sortMetaHeaderHashesForCurrentBlockByNonce(true) - sp.blockSizeThrottler.Add( round, core.MaxUint32(header.ItemsInBody(), header.ItemsInHeader())) @@ -1749,60 +1741,3 @@ func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( return maxMbSpaceRemained } - -//TODO: remove bool parameter and give instead the set to sort -func (sp *shardProcessor) sortMetaHeadersForCurrentBlockByNonce(usedInBlock bool) ([]*block.MetaBlock, error) { - hdrsForCurrentBlock := make([]*block.MetaBlock, 0) - - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { - if hdrInfo.usedInBlock != usedInBlock { - continue - } - - metaHdr, ok := hdrInfo.hdr.(*block.MetaBlock) - if !ok { - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return nil, process.ErrWrongTypeAssertion - } - - hdrsForCurrentBlock = append(hdrsForCurrentBlock, metaHdr) - } - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - - if len(hdrsForCurrentBlock) > 1 { - sort.Slice(hdrsForCurrentBlock, func(i, j int) bool { - return hdrsForCurrentBlock[i].Nonce < hdrsForCurrentBlock[j].Nonce - }) - } - - return hdrsForCurrentBlock, nil -} - -//TODO: remove bool parameter and give instead the set to sort -func (sp *shardProcessor) sortMetaHeaderHashesForCurrentBlockByNonce(usedInBlock bool) [][]byte { - hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) - - sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { - if hdrInfo.usedInBlock != usedInBlock { - continue - } - - hdrsForCurrentBlockInfo = append(hdrsForCurrentBlockInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) - } - sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - - if len(hdrsForCurrentBlockInfo) > 1 { - sort.Slice(hdrsForCurrentBlockInfo, func(i, j int) bool { - return hdrsForCurrentBlockInfo[i].nonce < hdrsForCurrentBlockInfo[j].nonce - }) - } - - hdrsHashesForCurrentBlock := make([][]byte, len(hdrsForCurrentBlockInfo)) - for i := 0; i < len(hdrsForCurrentBlockInfo); i++ { - hdrsHashesForCurrentBlock[i] = hdrsForCurrentBlockInfo[i].hash - } - - return hdrsHashesForCurrentBlock -} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index d1e8da10797..f39df0ad628 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -1353,7 +1353,7 @@ func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - sp.SetHighestHdrNonceForCurrentBlock(1) + sp.SetHighestHdrNonceForCurrentBlock(sharding.MetachainShardId, 1) res := sp.RequestMissingFinalityAttestingHeaders() assert.Equal(t, res > 0, true) } From 0df200e26225b471aa71967e702cabd402f373de Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 8 Oct 2019 13:40:24 +0300 Subject: [PATCH 217/234] * Removed empty rows --- cmd/node/config/economics.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index e57a168557a..cc06fbf98f4 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -13,6 +13,3 @@ MinGasPrice = 0 MinGasLimitForTx = 5 MinTxFee = 0 - - - From a93fdc8eab6db1c23245552d9ff8b92d2202c80a Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 8 Oct 2019 13:41:37 +0300 Subject: [PATCH 218/234] * Put back initial peer list --- cmd/node/config/p2p.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index b60b68bf405..9c8f144138a 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -44,4 +44,4 @@ # #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap #phase but will accept connections and will do the network discovery if another peer connects to it - InitialPeerList = ["/ip4/127.0.0.1/tcp/10000/p2p/16Uiu2HAmAPaxcnVCfC7F59LTbBU2UwWNWfJHmuxwQEdDDyjmaSW4"] + InitialPeerList = ["/ip4/127.0.0.1/tcp/10000/p2p/16Uiu2HAmAzokH1ozUF52Vy3RKqRfCMr9ZdNDkUQFEkXRs9DqvmKf"] From cbd8d6bf2eedb180bc5ff60a63d6e5bdb2f66098 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 8 Oct 2019 15:14:37 +0300 Subject: [PATCH 219/234] * Increase max tx noce delta allowed value from 100 to 15k, because of an edge case which could stuck the shard to advance with the next block --- cmd/node/factory/structs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index fff86ee4e11..75968cc6f11 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -92,7 +92,7 @@ const ( var log = logger.DefaultLogger() -const maxTxNonceDeltaAllowed = 100 +const maxTxNonceDeltaAllowed = 15000 // ErrCreateForkDetector signals that a fork detector could not be created //TODO: Extract all others error messages from this file in some defined errors From 8d34cdb09a6ed5979fda5c4c6f509897b0977181 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Tue, 8 Oct 2019 15:16:46 +0300 Subject: [PATCH 220/234] modified the log level prints on monitor.go --- node/heartbeat/monitor.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index 347c3059c51..153d9676b0b 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -84,7 +84,7 @@ func NewMonitor( err = mon.loadRestOfPubKeysFromStorage() if err != nil { - log.Warn(fmt.Sprintf("heartbeat can't load public keys from storage: %s", err.Error())) + log.Debug(fmt.Sprintf("heartbeat can't load public keys from storage: %s", err.Error())) } return mon, nil @@ -200,7 +200,7 @@ func (m *Monitor) addHeartbeatMessageToMap(hb *Heartbeat) { hbDTO := m.convertToExportedStruct(hbmi) err := m.storer.SavePubkeyData(hb.Pubkey, &hbDTO) if err != nil { - log.Warn(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) + log.Error(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) } m.addPeerToFullPeersSlice(hb.Pubkey) } @@ -210,7 +210,7 @@ func (m *Monitor) addPeerToFullPeersSlice(pubKey []byte) { m.fullPeersSlice = append(m.fullPeersSlice, pubKey) err := m.storer.SaveKeys(m.fullPeersSlice) if err != nil { - log.Warn(fmt.Sprintf("can't store the keys slice: %s", err.Error())) + log.Error(fmt.Sprintf("can't store the keys slice: %s", err.Error())) } } } From cee67091d11f5c6bee7e6aedd0dd9a266fe335b9 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Tue, 8 Oct 2019 15:47:37 +0300 Subject: [PATCH 221/234] EN-4257 - fix after review --- config/tomlConfig_test.go | 6 ++-- .../block/executingRewardMiniblocks_test.go | 8 ++--- process/economics/economicsData.go | 36 +++++++++---------- process/economics/economicsData_test.go | 4 +-- process/errors.go | 6 ++-- process/transaction/process.go | 6 ++-- 6 files changed, 32 insertions(+), 34 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 4f740f8daa5..5839d882b56 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -121,12 +121,12 @@ func TestTomlParser(t *testing.T) { func TestTomlEconomicsParser(t *testing.T) { communityAddress := "commAddr" burnAddress := "burnAddr" - rewardsValue := "500" + rewardsValue := "1000000000000000000000000000000000" communityPercentage := 0.1 leaderPercentage := 0.1 burnPercentage := 0.8 - minGasPrice := "1" - minGasLimitForTx := "2" + minGasPrice := "18446744073709551615" + minGasLimitForTx := "18446744073709551615" cfgEconomicsExpected := ConfigEconomics{ EconomicsAddresses: EconomicsAddresses{ diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go index b76d4c1d382..070228eb130 100644 --- a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -335,7 +335,7 @@ func verifyRewardsForMetachain( assert.Nil(t, err) expectedBalance := big.NewInt(0).SetUint64(uint64(numOfTimesRewarded)) - expectedBalance = expectedBalance.Mul(expectedBalance, rewardValue) + expectedBalance.Mul(expectedBalance, rewardValue) assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) } } @@ -361,11 +361,11 @@ func verifyRewardsForShards( nbProposedTxs := nbTxsForLeaderAddress[address] expectedBalance := big.NewInt(0).SetUint64(uint64(nbRewards)) - expectedBalance = expectedBalance.Mul(expectedBalance, rewardValue) + expectedBalance.Mul(expectedBalance, rewardValue) totalFees := big.NewInt(0).SetUint64(uint64(nbProposedTxs)) - totalFees = totalFees.Mul(totalFees, big.NewInt(0).SetUint64(uint64(feePerTxForLeader))) + totalFees.Mul(totalFees, big.NewInt(0).SetUint64(uint64(feePerTxForLeader))) - expectedBalance = expectedBalance.Add(expectedBalance, totalFees) + expectedBalance.Add(expectedBalance, totalFees) fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) } diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 987ac3e6ea9..9eb1ff1d770 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -22,7 +22,7 @@ type EconomicsData struct { // NewEconomicsData will create and object with information about economics parameters func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) { - //TODO check addresses what happens if addresses are wrong + //TODO check what happens if addresses are wrong rewardsValue, minGasPrice, minGasLimitForTx, err := convertValues(economics) if err != nil { return nil, err @@ -59,10 +59,12 @@ func convertValues(economics *config.ConfigEconomics) (*big.Int, uint64, uint64, if !ok { return nil, 0, 0, process.ErrInvalidRewardsValue } + minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) if err != nil { return nil, 0, 0, process.ErrInvalidMinimumGasPrice } + minGasLimitForTx, err := strconv.ParseUint(economics.FeeSettings.MinGasLimitForTx, conversionBase, bitConversionSize) if err != nil { return nil, 0, 0, process.ErrInvalidMinimumGasLimitForTx @@ -76,34 +78,30 @@ func checkValues(economics *config.ConfigEconomics) error { bigCommunityPercentage := big.NewFloat(economics.RewardsSettings.CommunityPercentage) bigLeaderPercentage := big.NewFloat(economics.RewardsSettings.LeaderPercentage) - notGreaterOrEqualWithZero := bigBurnPercentage.Cmp(big.NewFloat(0.0)) - notLessThanOne := big.NewFloat(1.0).Cmp(bigBurnPercentage) - if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { + if isNotPercentageValid(bigBurnPercentage) || isNotPercentageValid(bigCommunityPercentage) || isNotPercentageValid(bigLeaderPercentage) { return process.ErrInvalidRewardsPercentages } - notGreaterOrEqualWithZero = bigCommunityPercentage.Cmp(big.NewFloat(0.0)) - notLessThanOne = big.NewFloat(1.0).Cmp(bigCommunityPercentage) - if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { - return process.ErrInvalidRewardsPercentages - } + sumPercentage := new(big.Float) + sumPercentage.Add(bigBurnPercentage, bigCommunityPercentage) + sumPercentage.Add(sumPercentage, bigLeaderPercentage) - notGreaterOrEqualWithZero = bigLeaderPercentage.Cmp(big.NewFloat(0.0)) - notLessThanOne = big.NewFloat(1.0).Cmp(bigLeaderPercentage) - if notGreaterOrEqualWithZero < 0 || notLessThanOne < 0 { + isNotEqualToOne := sumPercentage.Cmp(big.NewFloat(1.0)) != 0 + if isNotEqualToOne { return process.ErrInvalidRewardsPercentages } - sumPercentage := new(big.Float) - sumPercentage = sumPercentage.Add(bigBurnPercentage, bigCommunityPercentage) - sumPercentage = sumPercentage.Add(sumPercentage, bigLeaderPercentage) + return nil +} - equalsWithOne := sumPercentage.Cmp(big.NewFloat(1.0)) - if equalsWithOne != 0 { - return process.ErrInvalidRewardsPercentages +func isNotPercentageValid(percentage *big.Float) bool { + isLessThanZero := percentage.Cmp(big.NewFloat(0.0)) < 0 + isGreaterThanOne := big.NewFloat(1.0).Cmp(percentage) < 0 + if isLessThanZero || isGreaterThanOne { + return true } - return nil + return false } // RewardsValue will return rewards value diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 9053f097aaa..50cbca84bee 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -18,13 +18,13 @@ func createDummyEconomicsConfig() *config.ConfigEconomics { BurnAddress: "addr2", }, RewardsSettings: config.RewardsSettings{ - RewardsValue: "1000", + RewardsValue: "1000000000000000000000000000000000", CommunityPercentage: 0.1, LeaderPercentage: 0.1, BurnPercentage: 0.8, }, FeeSettings: config.FeeSettings{ - MinGasPrice: "100", + MinGasPrice: "18446744073709551615", MinGasLimitForTx: "500", }, } diff --git a/process/errors.go b/process/errors.go index f6bfa673ee2..7cca2818df1 100644 --- a/process/errors.go +++ b/process/errors.go @@ -496,13 +496,13 @@ var ErrInsufficientGasPriceInTx = errors.New("insufficient gas price in tx") // ErrInsufficientGasLimitInTx signals that a lower gas limit than required was provided var ErrInsufficientGasLimitInTx = errors.New("insufficient gas limit in tx") -// ErrInvalidMinimumGasPrice signals that a invalid gas price has been read from config file +// ErrInvalidMinimumGasPrice signals that an invalid gas price has been read from config file var ErrInvalidMinimumGasPrice = errors.New("invalid minimum gas price") -// ErrInvalidMinimumGasLimitForTx signals that a invalid minimum gas limit for transactions has been read from config file +// ErrInvalidMinimumGasLimitForTx signals that an invalid minimum gas limit for transactions has been read from config file var ErrInvalidMinimumGasLimitForTx = errors.New("invalid minimum gas limit for transactions") -// ErrInvalidRewardsValue signals that a invalid rewards value has been read from config file +// ErrInvalidRewardsValue signals that an invalid rewards value has been read from config file var ErrInvalidRewardsValue = errors.New("invalid rewards value") // ErrInvalidRewardsPercentages signal that rewards percentages are not correct diff --git a/process/transaction/process.go b/process/transaction/process.go index d158f516c56..083026646b7 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -134,11 +134,11 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st txDataLen := int64(len(tx.Data)) txProc.mutTxFee.RLock() minTxFee := big.NewInt(0).SetUint64(txProc.economicsFee.MinGasLimitForTx()) - minTxFee = minTxFee.Mul(minTxFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + minTxFee.Mul(minTxFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) minFee := big.NewInt(0) - minFee = minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) - minFee = minFee.Add(minFee, minTxFee) + minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + minFee.Add(minFee, minTxFee) txProc.mutTxFee.RUnlock() if minFee.Cmp(cost) > 0 { From d5596342c2ed3b14b51edf570eb190727d9ae6be Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Tue, 8 Oct 2019 20:15:46 +0300 Subject: [PATCH 222/234] * Fixed after review --- process/block/baseProcess.go | 38 +++++++++++++++--------- process/block/export_test.go | 11 ++++--- process/block/metablock.go | 50 ++++++++++++++------------------ process/block/shardblock.go | 23 +++++++-------- process/block/shardblock_test.go | 6 ++-- 5 files changed, 65 insertions(+), 63 deletions(-) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index b8ec6deb16d..c9e2bbe2582 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -556,14 +556,20 @@ func checkProcessorNilParameters( } func (bp *baseProcessor) createBlockStarted() { + bp.resetMissingHdrs() bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - bp.hdrsForCurrBlock.missingHdrs = 0 - bp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 bp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) bp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } +func (bp *baseProcessor) resetMissingHdrs() { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.missingHdrs = 0 + bp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + //TODO: remove bool parameter and give instead the set to sort func (bp *baseProcessor) sortHeadersForCurrentBlockByNonce(usedInBlock bool) map[uint32][]data.HeaderHandler { hdrsForCurrentBlock := make(map[uint32][]data.HeaderHandler) @@ -579,8 +585,7 @@ func (bp *baseProcessor) sortHeadersForCurrentBlockByNonce(usedInBlock bool) map bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() // sort headers for each shard - for shardId := uint32(0); shardId < bp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hdrsForCurrentBlock[shardId] + for _, hdrsForShard := range hdrsForCurrentBlock { process.SortHeadersByNonce(hdrsForShard) } @@ -588,8 +593,8 @@ func (bp *baseProcessor) sortHeadersForCurrentBlockByNonce(usedInBlock bool) map } //TODO: remove bool parameter and give instead the set to sort -func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool) [][]byte { - hdrsForCurrentBlockInfo := make([]*nonceAndHashInfo, 0) +func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool) map[uint32][][]byte { + hdrsForCurrentBlockInfo := make(map[uint32][]*nonceAndHashInfo) bp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for metaBlockHash, hdrInfo := range bp.hdrsForCurrBlock.hdrHashAndInfo { @@ -597,19 +602,24 @@ func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool continue } - hdrsForCurrentBlockInfo = append(hdrsForCurrentBlockInfo, &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) + hdrsForCurrentBlockInfo[hdrInfo.hdr.GetShardID()] = append(hdrsForCurrentBlockInfo[hdrInfo.hdr.GetShardID()], + &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) } bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - if len(hdrsForCurrentBlockInfo) > 1 { - sort.Slice(hdrsForCurrentBlockInfo, func(i, j int) bool { - return hdrsForCurrentBlockInfo[i].nonce < hdrsForCurrentBlockInfo[j].nonce - }) + for _, hdrsForShard := range hdrsForCurrentBlockInfo { + if len(hdrsForShard) > 1 { + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].nonce < hdrsForShard[j].nonce + }) + } } - hdrsHashesForCurrentBlock := make([][]byte, len(hdrsForCurrentBlockInfo)) - for i := 0; i < len(hdrsForCurrentBlockInfo); i++ { - hdrsHashesForCurrentBlock[i] = hdrsForCurrentBlockInfo[i].hash + hdrsHashesForCurrentBlock := make(map[uint32][][]byte) + for shardId, hdrsForShard := range hdrsForCurrentBlockInfo { + for _, hdrForShard := range hdrsForShard { + hdrsHashesForCurrentBlock[shardId] = append(hdrsHashesForCurrentBlock[shardId], hdrForShard.hash) + } } return hdrsHashesForCurrentBlock diff --git a/process/block/export_test.go b/process/block/export_test.go index 0f251c7d98b..ba5a98aa534 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -40,8 +40,8 @@ func (sp *shardProcessor) ReceivedMetaBlock(metaBlockHash []byte) { sp.receivedMetaBlock(metaBlockHash) } -func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool) (block.Body, error) { - return sp.createMiniBlocks(noShards, maxItemsInBlock, round, haveTime) +func (sp *shardProcessor) CreateMiniBlocks(maxItemsInBlock uint32, round uint64, haveTime func() bool) (block.Body, error) { + return sp.createMiniBlocks(maxItemsInBlock, round, haveTime) } func (sp *shardProcessor) GetOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { @@ -110,8 +110,8 @@ func (mp *metaProcessor) RemoveBlockInfoFromPool() error { return mp.removeBlockInfoFromPool() } -func (mp *metaProcessor) ReceivedHeader(hdrHash []byte) { - mp.receivedHeader(hdrHash) +func (mp *metaProcessor) ReceivedShardHeader(shardHeaderHash []byte) { + mp.receivedShardHeader(shardHeaderHash) } func (mp *metaProcessor) AddHdrHashToRequestedList(hdr *block.Header, hdrHash []byte) { @@ -257,12 +257,11 @@ func (sp *shardProcessor) GetOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err } func (sp *shardProcessor) CreateAndProcessCrossMiniBlocksDstMe( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, ) (block.MiniBlockSlice, uint32, uint32, error) { - return sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + return sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) } func (bp *baseProcessor) SetBlockSizeThrottler(blockSizeThrottler process.BlockSizeThrottler) { diff --git a/process/block/metablock.go b/process/block/metablock.go index c5f1b18f8a3..ed9b9f094cd 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -115,7 +115,7 @@ func NewMetaProcessor( mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) headerPool := mp.dataPool.ShardHeaders() - headerPool.RegisterHandler(mp.receivedHeader) + headerPool.RegisterHandler(mp.receivedShardHeader) mp.chRcvAllHdrs = make(chan bool) @@ -183,11 +183,11 @@ func (mp *metaProcessor) ProcessBlock( err = mp.waitForBlockHeaders(haveTime()) - mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() missingShardHdrs := mp.hdrsForCurrBlock.missingHdrs - mp.hdrsForCurrBlock.missingHdrs = 0 - mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 - mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + mp.resetMissingHdrs() if requestedShardHdrs > 0 { log.Info(fmt.Sprintf("received %d missing shard headers\n", requestedShardHdrs-missingShardHdrs)) @@ -669,12 +669,7 @@ func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHand return highestNonceHdrs, nil } - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := usedShardHdrs[shardId] - if len(hdrsForShard) == 0 { - continue - } - + for shardId, hdrsForShard := range usedShardHdrs { for _, shardHdr := range hdrsForShard { err := mp.isHdrConstructionValid(shardHdr, tmpLastNotarized[shardId]) if err != nil { @@ -693,17 +688,16 @@ func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHand func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { finalityAttestingShardHdrs := mp.sortHeadersForCurrentBlockByNonce(false) - for index, lastVerifiedHdr := range highestNonceHdrs { + for shardId, lastVerifiedHdr := range highestNonceHdrs { if lastVerifiedHdr == nil || lastVerifiedHdr.IsInterfaceNil() { return process.ErrNilBlockHeader } - if lastVerifiedHdr.GetShardID() != index { + if lastVerifiedHdr.GetShardID() != shardId { return process.ErrShardIdMissmatch } // verify if there are "K" block after current to make this one final nextBlocksVerified := uint32(0) - shardId := lastVerifiedHdr.GetShardID() for _, shardHdr := range finalityAttestingShardHdrs[shardId] { if nextBlocksVerified >= mp.shardBlockFinality { break @@ -777,40 +771,40 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr return false, nil } -// receivedHeader is a call back function which is called when a new header +// receivedShardHeader is a call back function which is called when a new header // is added in the headers pool -func (mp *metaProcessor) receivedHeader(shardBlockHash []byte) { - shardBlockPool := mp.dataPool.ShardHeaders() - if shardBlockPool == nil { +func (mp *metaProcessor) receivedShardHeader(shardHeaderHash []byte) { + shardHeaderPool := mp.dataPool.ShardHeaders() + if shardHeaderPool == nil { return } - obj, ok := shardBlockPool.Peek(shardBlockHash) + obj, ok := shardHeaderPool.Peek(shardHeaderHash) if !ok { return } - shardBlock, ok := obj.(*block.Header) + shardHeader, ok := obj.(*block.Header) if !ok { return } log.Debug(fmt.Sprintf("received shard block with hash %s and nonce %d from network\n", - core.ToB64(shardBlockHash), - shardBlock.Nonce)) + core.ToB64(shardHeaderHash), + shardHeader.Nonce)) mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() haveMissingShardHeaders := mp.hdrsForCurrBlock.missingHdrs > 0 || mp.hdrsForCurrBlock.missingFinalityAttestingHdrs > 0 if haveMissingShardHeaders { - hdrInfoForHash := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardBlockHash)] + hdrInfoForHash := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] receivedMissingShardHeader := hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) if receivedMissingShardHeader { - hdrInfoForHash.hdr = shardBlock + hdrInfoForHash.hdr = shardHeader mp.hdrsForCurrBlock.missingHdrs-- - if shardBlock.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardBlock.ShardId] { - mp.hdrsForCurrBlock.highestHdrNonce[shardBlock.ShardId] = shardBlock.Nonce + if shardHeader.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardHeader.ShardId] { + mp.hdrsForCurrBlock.highestHdrNonce[shardHeader.ShardId] = shardHeader.Nonce } } @@ -850,7 +844,7 @@ func (mp *metaProcessor) requestMissingFinalityAttestingHeaders() uint32 { continue } - shardBlock, shardBlockHash, err := process.GetShardHeaderFromPoolWithNonce( + shardHeader, shardHeaderHash, err := process.GetShardHeaderFromPoolWithNonce( i, shardId, mp.dataPool.ShardHeaders(), @@ -862,7 +856,7 @@ func (mp *metaProcessor) requestMissingFinalityAttestingHeaders() uint32 { continue } - mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardBlockHash)] = &hdrInfo{hdr: shardBlock, usedInBlock: false} + mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] = &hdrInfo{hdr: shardHeader, usedInBlock: false} } } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index eeac04e35b4..143f3ec5c2e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -209,11 +209,11 @@ func (sp *shardProcessor) ProcessBlock( err = sp.waitForMetaHdrHashes(haveTime()) - sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() missingMetaHdrs := sp.hdrsForCurrBlock.missingHdrs - sp.hdrsForCurrBlock.missingHdrs = 0 - sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 - sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + sp.resetMissingHdrs() if requestedMetaHdrs > 0 { log.Info(fmt.Sprintf("received %d missing meta headers\n", requestedMetaHdrs-missingMetaHdrs)) @@ -613,7 +613,7 @@ func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (d sp.createBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() - miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + miniBlocks, err := sp.createMiniBlocks(sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) if err != nil { return nil, err } @@ -1177,14 +1177,14 @@ func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { return requestedBlockHeaders } -func (sp *shardProcessor) requestMetaHeaders(shardBlock *block.Header) (uint32, uint32) { +func (sp *shardProcessor) requestMetaHeaders(shardHeader *block.Header) (uint32, uint32) { _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) - if len(shardBlock.MetaBlockHashes) == 0 { + if len(shardHeader.MetaBlockHashes) == 0 { return 0, 0 } - missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(shardBlock) + missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(shardHeader) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for _, hash := range missingHeadersHashes { @@ -1367,7 +1367,6 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd // full verification through metachain header func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, @@ -1473,7 +1472,6 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( } func (sp *shardProcessor) createMiniBlocks( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, @@ -1495,7 +1493,7 @@ func (sp *shardProcessor) createMiniBlocks( return nil, process.ErrNilTransactionPool } - destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) if err != nil { log.Info(err.Error()) } @@ -1583,7 +1581,8 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round header.MiniBlockHeaders = miniBlockHeaders header.TxCount = uint32(totalTxCount) - header.MetaBlockHashes = sp.sortHeaderHashesForCurrentBlockByNonce(true) + metaBlockHashes := sp.sortHeaderHashesForCurrentBlockByNonce(true) + header.MetaBlockHashes = metaBlockHashes[sharding.MetachainShardId] sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index f39df0ad628..2bb5628004b 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2648,7 +2648,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) assert.Equal(t, err == nil, true) assert.Equal(t, len(miniBlockSlice) == 0, true) assert.Equal(t, usedMetaHdrsHashes, uint32(0)) @@ -2755,7 +2755,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) assert.Equal(t, 0, len(miniBlocksReturned)) assert.Equal(t, uint32(0), usedMetaHdrsHashes) @@ -2870,7 +2870,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T arguments.TxCoordinator = tc bp, _ := blproc.NewShardProcessor(arguments) - blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) + blockBody, err := bp.CreateMiniBlocks(15000, 0, func() bool { return true }) assert.Nil(t, err) //testing execution From 9dd9cc3bcbc9dc8251f223bb34c33b9e48100827 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 9 Oct 2019 10:33:07 +0300 Subject: [PATCH 223/234] EN-4257 - fix after review --- cmd/node/config/economics.toml | 2 +- config/economicsConfig.go | 4 +- config/tomlConfig_test.go | 8 ++-- integrationTests/mock/feeHandlerStub.go | 10 ++--- .../smartContract/testInitializer.go | 6 +-- integrationTests/testInitializer.go | 40 +------------------ integrationTests/testProcessorNode.go | 10 ++--- process/block/preprocess/transactions.go | 2 +- process/block/preprocess/transactions_test.go | 2 +- process/block/shardblock_test.go | 2 +- process/coordinator/process_test.go | 2 +- process/economics/economicsData.go | 20 +++++----- process/economics/economicsData_test.go | 16 ++++---- process/interface.go | 2 +- process/mock/feeHandlerStub.go | 10 ++--- process/transaction/interceptedTransaction.go | 2 +- .../interceptedTransaction_test.go | 2 +- process/transaction/process.go | 2 +- process/transaction/process_test.go | 2 +- 19 files changed, 53 insertions(+), 91 deletions(-) diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index e562d387574..8050c94e0e1 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -11,7 +11,7 @@ [FeeSettings] MinGasPrice = "0" - MinGasLimitForTx = "5" + MinGasLimit = "5" diff --git a/config/economicsConfig.go b/config/economicsConfig.go index dfb4fbad4bc..54c28875c7f 100644 --- a/config/economicsConfig.go +++ b/config/economicsConfig.go @@ -16,8 +16,8 @@ type RewardsSettings struct { // FeeSettings will hold economics fee settings type FeeSettings struct { - MinGasPrice string - MinGasLimitForTx string + MinGasPrice string + MinGasLimit string } // ConfigEconomics will hold economics config diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 5839d882b56..a63f3c769bc 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -126,7 +126,7 @@ func TestTomlEconomicsParser(t *testing.T) { leaderPercentage := 0.1 burnPercentage := 0.8 minGasPrice := "18446744073709551615" - minGasLimitForTx := "18446744073709551615" + minGasLimit := "18446744073709551615" cfgEconomicsExpected := ConfigEconomics{ EconomicsAddresses: EconomicsAddresses{ @@ -140,8 +140,8 @@ func TestTomlEconomicsParser(t *testing.T) { BurnPercentage: burnPercentage, }, FeeSettings: FeeSettings{ - MinGasPrice: minGasPrice, - MinGasLimitForTx: minGasLimitForTx, + MinGasPrice: minGasPrice, + MinGasLimit: minGasLimit, }, } @@ -156,7 +156,7 @@ func TestTomlEconomicsParser(t *testing.T) { BurnPercentage = ` + fmt.Sprintf("%.6f", burnPercentage) + ` [FeeSettings] MinGasPrice = "` + minGasPrice + `" - MinGasLimitForTx = "` + minGasLimitForTx + `" + MinGasLimit = "` + minGasLimit + `" ` cfg := ConfigEconomics{} diff --git a/integrationTests/mock/feeHandlerStub.go b/integrationTests/mock/feeHandlerStub.go index a2a1fe31c87..f6d983310e7 100644 --- a/integrationTests/mock/feeHandlerStub.go +++ b/integrationTests/mock/feeHandlerStub.go @@ -1,17 +1,17 @@ package mock type FeeHandlerStub struct { - MinGasPriceCalled func() uint64 - MinGasLimitForTxCalled func() uint64 - MinTxFeeCalled func() uint64 + MinGasPriceCalled func() uint64 + MinGasLimitCalled func() uint64 + MinTxFeeCalled func() uint64 } func (fhs *FeeHandlerStub) MinGasPrice() uint64 { return fhs.MinGasPriceCalled() } -func (fhs *FeeHandlerStub) MinGasLimitForTx() uint64 { - return fhs.MinGasLimitForTxCalled() +func (fhs *FeeHandlerStub) MinGasLimit() uint64 { + return fhs.MinGasLimitCalled() } func (fhs *FeeHandlerStub) MinTxFee() uint64 { diff --git a/integrationTests/multiShard/smartContract/testInitializer.go b/integrationTests/multiShard/smartContract/testInitializer.go index bdd24178118..bb272b90440 100644 --- a/integrationTests/multiShard/smartContract/testInitializer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -305,7 +305,7 @@ func createNetNode( MinGasPriceCalled: func() uint64 { return integrationTests.MinTxGasPrice }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return integrationTests.MinTxGasLimit }, MinTxFeeCalled: func() uint64 { @@ -414,7 +414,7 @@ func createNetNode( rewardsHandler, txTypeHandler, &mock.FeeHandlerStub{ - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { @@ -441,7 +441,7 @@ func createNetNode( rewardProcessor, internalTxProducer, &mock.FeeHandlerStub{ - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index bb2597f870a..b5f6a3f9a1e 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -435,7 +435,7 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { @@ -1000,23 +1000,6 @@ func ComputeAndRequestMissingTransactions( } } -// ComputeAndRequestMissingRewardTxs computes the missing reward transactions for each node and requests them -func ComputeAndRequestMissingRewardTxs( - nodes []*TestProcessorNode, - generatedDataHashes [][]byte, - shardResolver uint32, - shardRequesters ...uint32, -) { - for _, n := range nodes { - if !Uint32InSlice(n.ShardCoordinator.SelfId(), shardRequesters) { - continue - } - - neededData := getMissingRewardTxsForNode(n, generatedDataHashes) - requestMissingRewardTxs(n, shardResolver, neededData) - } -} - func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { neededTxs := make([][]byte, 0) @@ -1030,19 +1013,6 @@ func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][] return neededTxs } -func getMissingRewardTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][]byte { - neededTxs := make([][]byte, 0) - - for i := 0; i < len(generatedTxHashes); i++ { - _, ok := n.ShardDataPool.RewardTransactions().SearchFirstData(generatedTxHashes[i]) - if !ok { - neededTxs = append(neededTxs, generatedTxHashes[i]) - } - } - - return neededTxs -} - func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, neededTxs [][]byte) { txResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.TransactionTopic, shardResolver) @@ -1051,14 +1021,6 @@ func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, need } } -func requestMissingRewardTxs(n *TestProcessorNode, shardResolver uint32, neededData [][]byte) { - dataResolver, _ := n.ResolverFinder.CrossShardResolver(procFactory.RewardsTransactionTopic, shardResolver) - - for i := 0; i < len(neededData); i++ { - _ = dataResolver.RequestDataFromHash(neededData[i]) - } -} - // CreateRequesterDataPool creates a datapool with a mock txPool func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int) dataRetriever.PoolsHolder { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index aa3884f39a5..b1ec873735f 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -256,7 +256,7 @@ func (tpn *TestProcessorNode) initChainHandler() { func (tpn *TestProcessorNode) initEconomicsData() { mingGasPrice := strconv.FormatUint(MinTxGasPrice, 10) - minGasLimitForTx := strconv.FormatUint(MinTxGasLimit, 10) + minGasLimit := strconv.FormatUint(MinTxGasLimit, 10) economicsData, _ := economics.NewEconomicsData( &config.ConfigEconomics{ @@ -271,8 +271,8 @@ func (tpn *TestProcessorNode) initEconomicsData() { BurnPercentage: 0.40, }, FeeSettings: config.FeeSettings{ - MinGasPrice: mingGasPrice, - MinGasLimitForTx: minGasLimitForTx, + MinGasPrice: mingGasPrice, + MinGasLimit: minGasLimit, }, }, ) @@ -435,7 +435,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { @@ -462,7 +462,7 @@ func (tpn *TestProcessorNode) initInnerProcessors() { MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 02c71b686df..7d2324687ce 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -519,7 +519,7 @@ func (txs *transactions) CreateAndProcessMiniBlock( continue } - currTxGasLimit := txs.economicsFee.MinGasLimitForTx() + currTxGasLimit := txs.economicsFee.MinGasLimit() if isSmartContractAddress(orderedTxs[index].RcvAddr) { currTxGasLimit = orderedTxs[index].GasLimit } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index ea546b35421..2aa67e92738 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -32,7 +32,7 @@ func FeeHandlerMock() *mock.FeeHandlerStub { MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index d1e8da10797..78ccaf72a23 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2846,7 +2846,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T &mock.RewardTxProcessorMock{}, &mock.IntermediateTransactionHandlerMock{}, &mock.FeeHandlerStub{ - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 0 }, }, diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index dec953e334a..3829e1a94d4 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -36,7 +36,7 @@ func FeeHandlerMock() *mock.FeeHandlerStub { MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 9eb1ff1d770..797f7d49dfb 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -15,7 +15,7 @@ type EconomicsData struct { leaderPercentage float64 burnPercentage float64 minGasPrice uint64 - minGasLimitForTx uint64 + minGasLimit uint64 communityAddress string burnAddress string } @@ -23,7 +23,7 @@ type EconomicsData struct { // NewEconomicsData will create and object with information about economics parameters func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) { //TODO check what happens if addresses are wrong - rewardsValue, minGasPrice, minGasLimitForTx, err := convertValues(economics) + rewardsValue, minGasPrice, minGasLimit, err := convertValues(economics) if err != nil { return nil, err } @@ -44,7 +44,7 @@ func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) leaderPercentage: economics.RewardsSettings.LeaderPercentage, burnPercentage: economics.RewardsSettings.BurnPercentage, minGasPrice: minGasPrice, - minGasLimitForTx: minGasLimitForTx, + minGasLimit: minGasLimit, communityAddress: economics.EconomicsAddresses.CommunityAddress, burnAddress: economics.EconomicsAddresses.BurnAddress, }, nil @@ -65,12 +65,12 @@ func convertValues(economics *config.ConfigEconomics) (*big.Int, uint64, uint64, return nil, 0, 0, process.ErrInvalidMinimumGasPrice } - minGasLimitForTx, err := strconv.ParseUint(economics.FeeSettings.MinGasLimitForTx, conversionBase, bitConversionSize) + minGasLimit, err := strconv.ParseUint(economics.FeeSettings.MinGasLimit, conversionBase, bitConversionSize) if err != nil { return nil, 0, 0, process.ErrInvalidMinimumGasLimitForTx } - return rewardsValue, minGasPrice, minGasLimitForTx, nil + return rewardsValue, minGasPrice, minGasLimit, nil } func checkValues(economics *config.ConfigEconomics) error { @@ -78,7 +78,7 @@ func checkValues(economics *config.ConfigEconomics) error { bigCommunityPercentage := big.NewFloat(economics.RewardsSettings.CommunityPercentage) bigLeaderPercentage := big.NewFloat(economics.RewardsSettings.LeaderPercentage) - if isNotPercentageValid(bigBurnPercentage) || isNotPercentageValid(bigCommunityPercentage) || isNotPercentageValid(bigLeaderPercentage) { + if isPercentageInvalid(bigBurnPercentage) || isPercentageInvalid(bigCommunityPercentage) || isPercentageInvalid(bigLeaderPercentage) { return process.ErrInvalidRewardsPercentages } @@ -94,7 +94,7 @@ func checkValues(economics *config.ConfigEconomics) error { return nil } -func isNotPercentageValid(percentage *big.Float) bool { +func isPercentageInvalid(percentage *big.Float) bool { isLessThanZero := percentage.Cmp(big.NewFloat(0.0)) < 0 isGreaterThanOne := big.NewFloat(1.0).Cmp(percentage) < 0 if isLessThanZero || isGreaterThanOne { @@ -129,9 +129,9 @@ func (ed *EconomicsData) MinGasPrice() uint64 { return ed.minGasPrice } -// MinGasLimitForTx will return minimum gas limit -func (ed *EconomicsData) MinGasLimitForTx() uint64 { - return ed.minGasLimitForTx +// MinGasLimit will return minimum gas limit +func (ed *EconomicsData) MinGasLimit() uint64 { + return ed.minGasLimit } // CommunityAddress will return community address diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 50cbca84bee..38d68a298c7 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -24,8 +24,8 @@ func createDummyEconomicsConfig() *config.ConfigEconomics { BurnPercentage: 0.8, }, FeeSettings: config.FeeSettings{ - MinGasPrice: "18446744073709551615", - MinGasLimitForTx: "500", + MinGasPrice: "18446744073709551615", + MinGasLimit: "500", }, } } @@ -93,7 +93,7 @@ func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { } for _, minGasLimit := range bagMinGasLimit { - economicsConfig.FeeSettings.MinGasLimitForTx = minGasLimit + economicsConfig.FeeSettings.MinGasLimit = minGasLimit _, err := economics.NewEconomicsData(economicsConfig) assert.Equal(t, process.ErrInvalidMinimumGasLimitForTx, err) } @@ -225,16 +225,16 @@ func TestEconomicsData_MinGasPrice(t *testing.T) { assert.Equal(t, minGasPrice, value) } -func TestEconomicsData_MinGasLimitForTx(t *testing.T) { +func TestEconomicsData_MinGasLimit(t *testing.T) { t.Parallel() - minGasLimitForTx := uint64(1000) + minGasLimit := uint64(1000) economicsConfig := createDummyEconomicsConfig() - economicsConfig.FeeSettings.MinGasLimitForTx = strconv.FormatUint(minGasLimitForTx, 10) + economicsConfig.FeeSettings.MinGasLimit = strconv.FormatUint(minGasLimit, 10) economicsData, _ := economics.NewEconomicsData(economicsConfig) - value := economicsData.MinGasLimitForTx() - assert.Equal(t, minGasLimitForTx, value) + value := economicsData.MinGasLimit() + assert.Equal(t, minGasLimit, value) } func TestEconomicsData_CommunityAddress(t *testing.T) { diff --git a/process/interface.go b/process/interface.go index 1b97e570646..db8127c07a4 100644 --- a/process/interface.go +++ b/process/interface.go @@ -419,7 +419,7 @@ type RewardsHandler interface { // FeeHandler will return information about fees type FeeHandler interface { MinGasPrice() uint64 - MinGasLimitForTx() uint64 + MinGasLimit() uint64 IsInterfaceNil() bool } diff --git a/process/mock/feeHandlerStub.go b/process/mock/feeHandlerStub.go index a2a1fe31c87..f6d983310e7 100644 --- a/process/mock/feeHandlerStub.go +++ b/process/mock/feeHandlerStub.go @@ -1,17 +1,17 @@ package mock type FeeHandlerStub struct { - MinGasPriceCalled func() uint64 - MinGasLimitForTxCalled func() uint64 - MinTxFeeCalled func() uint64 + MinGasPriceCalled func() uint64 + MinGasLimitCalled func() uint64 + MinTxFeeCalled func() uint64 } func (fhs *FeeHandlerStub) MinGasPrice() uint64 { return fhs.MinGasPriceCalled() } -func (fhs *FeeHandlerStub) MinGasLimitForTx() uint64 { - return fhs.MinGasLimitForTxCalled() +func (fhs *FeeHandlerStub) MinGasLimit() uint64 { + return fhs.MinGasLimitCalled() } func (fhs *FeeHandlerStub) MinTxFee() uint64 { diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 398094e834a..7e5e3782060 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -156,7 +156,7 @@ func (inTx *InterceptedTransaction) integrity() error { } func (inTx *InterceptedTransaction) checkFeeValues() error { - isLowerGasLimitInTx := inTx.tx.GasLimit < inTx.feeHandler.MinGasLimitForTx() + isLowerGasLimitInTx := inTx.tx.GasLimit < inTx.feeHandler.MinGasLimit() if isLowerGasLimitInTx { return process.ErrInsufficientGasLimitInTx } diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index dc0ff9cae88..e4346da84a5 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -52,7 +52,7 @@ func createTxFeeHandler(gasPrice uint64, gasLimit uint64) process.FeeHandler { MinGasPriceCalled: func() uint64 { return gasPrice }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return gasLimit }, } diff --git a/process/transaction/process.go b/process/transaction/process.go index 083026646b7..020fb06e254 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -133,7 +133,7 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st txDataLen := int64(len(tx.Data)) txProc.mutTxFee.RLock() - minTxFee := big.NewInt(0).SetUint64(txProc.economicsFee.MinGasLimitForTx()) + minTxFee := big.NewInt(0).SetUint64(txProc.economicsFee.MinGasLimit()) minTxFee.Mul(minTxFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) minFee := big.NewInt(0) diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 8bcab0e79c1..073e59bef35 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -29,7 +29,7 @@ func FeeHandlerMock() *mock.FeeHandlerStub { MinGasPriceCalled: func() uint64 { return 0 }, - MinGasLimitForTxCalled: func() uint64 { + MinGasLimitCalled: func() uint64 { return 5 }, MinTxFeeCalled: func() uint64 { From e85983d24892e5dd741a76a0e8d8e8462dd29f7b Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 9 Oct 2019 12:04:37 +0300 Subject: [PATCH 224/234] EN-4257 - fix after review --- process/economics/economicsData.go | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 797f7d49dfb..fb796a8e2f7 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -1,6 +1,7 @@ package economics import ( + "math" "math/big" "strconv" @@ -20,6 +21,8 @@ type EconomicsData struct { burnAddress string } +const float64EqualityThreshold = 1e-9 + // NewEconomicsData will create and object with information about economics parameters func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) { //TODO check what happens if addresses are wrong @@ -74,33 +77,29 @@ func convertValues(economics *config.ConfigEconomics) (*big.Int, uint64, uint64, } func checkValues(economics *config.ConfigEconomics) error { - bigBurnPercentage := big.NewFloat(economics.RewardsSettings.BurnPercentage) - bigCommunityPercentage := big.NewFloat(economics.RewardsSettings.CommunityPercentage) - bigLeaderPercentage := big.NewFloat(economics.RewardsSettings.LeaderPercentage) - - if isPercentageInvalid(bigBurnPercentage) || isPercentageInvalid(bigCommunityPercentage) || isPercentageInvalid(bigLeaderPercentage) { + if isPercentageInvalid(economics.RewardsSettings.BurnPercentage) || + isPercentageInvalid(economics.RewardsSettings.CommunityPercentage) || + isPercentageInvalid(economics.RewardsSettings.LeaderPercentage) { return process.ErrInvalidRewardsPercentages } - sumPercentage := new(big.Float) - sumPercentage.Add(bigBurnPercentage, bigCommunityPercentage) - sumPercentage.Add(sumPercentage, bigLeaderPercentage) - - isNotEqualToOne := sumPercentage.Cmp(big.NewFloat(1.0)) != 0 - if isNotEqualToOne { + sumPercentage := economics.RewardsSettings.BurnPercentage + sumPercentage += economics.RewardsSettings.CommunityPercentage + sumPercentage += economics.RewardsSettings.LeaderPercentage + isEqualsToOne := math.Abs(sumPercentage-1.0) <= float64EqualityThreshold + if !isEqualsToOne { return process.ErrInvalidRewardsPercentages } return nil } -func isPercentageInvalid(percentage *big.Float) bool { - isLessThanZero := percentage.Cmp(big.NewFloat(0.0)) < 0 - isGreaterThanOne := big.NewFloat(1.0).Cmp(percentage) < 0 +func isPercentageInvalid(percentage float64) bool { + isLessThanZero := percentage < 0.0 + isGreaterThanOne := percentage > 1.0 if isLessThanZero || isGreaterThanOne { return true } - return false } From 3b8e8e611bbd49ea06e1f681966bdea5777d25a9 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Wed, 9 Oct 2019 13:46:43 +0300 Subject: [PATCH 225/234] EN-4270 - fix big with document id for round --- core/indexer/elasticsearch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index ae8de78fd35..d671d8747fd 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -255,7 +255,7 @@ func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { req := esapi.IndexRequest{ Index: roundIndex, - DocumentID: strconv.FormatInt(int64(roundInfo.Index), 10), + DocumentID: strconv.FormatUint(uint64(roundInfo.ShardId), 10) + "_" + strconv.FormatUint(roundInfo.Index, 10), Body: bytes.NewReader(buff.Bytes()), Refresh: "true", } From 8d83efff7de52ad7a396d9d62a8e0330d585c47c Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Wed, 9 Oct 2019 17:21:54 +0300 Subject: [PATCH 226/234] removed concurrent issue on elrond-go/process/block/preprocess/rewardTxProcessor.goL245 function AddComputedRewardMiniBlocks now protects rtp.rewardTxsForBlock.txHashAndInfo map write access. --- process/block/preprocess/rewardTxPreProcessor.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index a291f256b2d..a9a7b07e421 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -242,10 +242,12 @@ func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMinib log.Error(process.ErrWrongTypeAssertion.Error()) } + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ tx: rTx, txShardInfo: txShardData, } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() } } } From 22110db7629ecc549c4a7e14e19a205e68767e24 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Wed, 9 Oct 2019 14:34:14 +0300 Subject: [PATCH 227/234] * Implementation done --- core/computers.go | 16 +++ core/computers_test.go | 24 +++++ dataRetriever/errors.go | 4 +- dataRetriever/interface.go | 2 +- .../resolvers/genericBlockBodyResolver.go | 98 +++++++++---------- .../mock/miniBlocksResolverMock.go | 4 +- process/block/baseProcess.go | 14 +-- process/mock/miniBlocksResolverMock.go | 4 +- process/sync/baseSync.go | 49 +++++++++- process/sync/export_test.go | 2 +- process/sync/metablock.go | 7 +- process/sync/shardblock.go | 59 ++++++----- process/sync/shardblock_test.go | 20 ++-- 13 files changed, 197 insertions(+), 106 deletions(-) diff --git a/core/computers.go b/core/computers.go index d04b880699f..9552f3888b5 100644 --- a/core/computers.go +++ b/core/computers.go @@ -31,3 +31,19 @@ func MinUint32(a uint32, b uint32) uint32 { } return b } + +// MaxUint64 returns the maximum of two given numbers +func MaxUint64(a uint64, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// MinUint64 returns the minimum of two given numbers +func MinUint64(a uint64, b uint64) uint64 { + if a < b { + return a + } + return b +} diff --git a/core/computers_test.go b/core/computers_test.go index 8b26be785c9..397f3382879 100644 --- a/core/computers_test.go +++ b/core/computers_test.go @@ -54,3 +54,27 @@ func TestMinUint32ShouldReturnA(t *testing.T) { b := uint32(11) assert.Equal(t, a, core.MinUint32(a, b)) } + +func TestMaxUint64ShouldReturnA(t *testing.T) { + a := uint64(11) + b := uint64(10) + assert.Equal(t, a, core.MaxUint64(a, b)) +} + +func TestMaxUint64ShouldReturnB(t *testing.T) { + a := uint64(10) + b := uint64(11) + assert.Equal(t, b, core.MaxUint64(a, b)) +} + +func TestMinUint64ShouldReturnB(t *testing.T) { + a := uint64(11) + b := uint64(10) + assert.Equal(t, b, core.MinUint64(a, b)) +} + +func TestMinUint64ShouldReturnA(t *testing.T) { + a := uint64(10) + b := uint64(11) + assert.Equal(t, a, core.MinUint64(a, b)) +} diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index bf32a10e690..5bcba87ff98 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -40,8 +40,8 @@ var ErrNilPublicKey = errors.New("nil public key") // ErrNilSignature signals that a operation has been attempted with a nil signature var ErrNilSignature = errors.New("nil signature") -// ErrNilMiniBlocks signals that an operation has been attempted with a nil mini-block -var ErrNilMiniBlocks = errors.New("nil mini blocks") +// ErrEmptyMiniBlockSlice signals that an operation has been attempted with an empty mini block slice +var ErrEmptyMiniBlockSlice = errors.New("empty mini block slice") // ErrInvalidShardId signals that the shard id is invalid var ErrInvalidShardId = errors.New("invalid shard id") diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index c054fd36ea3..e6c89486379 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -60,7 +60,7 @@ type HeaderResolver interface { type MiniBlocksResolver interface { Resolver RequestDataFromHashArray(hashes [][]byte) error - GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice // TODO miniblockresolver should not know about miniblockslice + GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) // TODO miniblockresolver should not know about miniblockslice } // TopicResolverSender defines what sending operations are allowed for a topic resolver diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index cacec11910b..838168fc55a 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -74,19 +74,18 @@ func (gbbRes *GenericBlockBodyResolver) ProcessReceivedMessage(message p2p.Messa } func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd *dataRetriever.RequestData) ([]byte, error) { - if rd.Value == nil { return nil, dataRetriever.ErrNilValue } - miniBlockHashes, err := gbbRes.miniBlockHashesFromRequestType(rd) + hashes, err := gbbRes.miniBlockHashesFromRequestType(rd) if err != nil { return nil, err } - miniBlocks := gbbRes.GetMiniBlocks(miniBlockHashes) - if miniBlocks == nil { - return nil, dataRetriever.ErrNilMiniBlocks + miniBlocks, _ := gbbRes.GetMiniBlocks(hashes) + if len(miniBlocks) == 0 { + return nil, dataRetriever.ErrEmptyMiniBlockSlice } buff, err := gbbRes.marshalizer.Marshal(miniBlocks) @@ -141,89 +140,88 @@ func (gbbRes *GenericBlockBodyResolver) RequestDataFromHashArray(hashes [][]byte } // GetMiniBlocks method returns a list of deserialized mini blocks from a given hash list either from data pool or from storage -func (gbbRes *GenericBlockBodyResolver) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { - miniBlocks := gbbRes.getMiniBlocks(hashes) - if miniBlocks == nil { - return nil - } - - mbLength := len(hashes) - expandedMiniBlocks := make(block.MiniBlockSlice, mbLength) - - for i := 0; i < mbLength; i++ { - mb := &block.MiniBlock{} - err := gbbRes.marshalizer.Unmarshal(mb, miniBlocks[i]) +func (gbbRes *GenericBlockBodyResolver) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + marshalizedMiniBlocks, missingMiniBlocksHashes := gbbRes.getMiniBlocks(hashes) + miniBlocks := make(block.MiniBlockSlice, 0) + for hash, marshalizedMiniBlock := range marshalizedMiniBlocks { + miniBlock := &block.MiniBlock{} + err := gbbRes.marshalizer.Unmarshal(miniBlock, marshalizedMiniBlock) if err != nil { log.Debug(err.Error()) - gbbRes.miniBlockPool.Remove(hashes[i]) - err = gbbRes.miniBlockStorage.Remove(hashes[i]) + gbbRes.miniBlockPool.Remove([]byte(hash)) + err = gbbRes.miniBlockStorage.Remove([]byte(hash)) if err != nil { log.Debug(err.Error()) } - return nil + missingMiniBlocksHashes = append(missingMiniBlocksHashes, []byte(hash)) + continue } - expandedMiniBlocks[i] = mb + miniBlocks = append(miniBlocks, miniBlock) } - return expandedMiniBlocks + return miniBlocks, missingMiniBlocksHashes } // getMiniBlocks method returns a list of serialized mini blocks from a given hash list either from data pool or from storage -func (gbbRes *GenericBlockBodyResolver) getMiniBlocks(hashes [][]byte) [][]byte { - miniBlocks := gbbRes.getMiniBlocksFromCache(hashes) +func (gbbRes *GenericBlockBodyResolver) getMiniBlocks(hashes [][]byte) (map[string][]byte, [][]byte) { + marshalizedMiniBlocks, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromCache(hashes) + if len(missingMiniBlocksHashes) == 0 { + return marshalizedMiniBlocks, missingMiniBlocksHashes + } - if miniBlocks != nil { - return miniBlocks + marshalizedMiniBlocksFromStorer, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromStorer(missingMiniBlocksHashes) + for hash, marshalizedMiniBlockFromStorer := range marshalizedMiniBlocksFromStorer { + marshalizedMiniBlocks[hash] = marshalizedMiniBlockFromStorer } - return gbbRes.getMiniBlocksFromStorer(hashes) + return marshalizedMiniBlocks, missingMiniBlocksHashes } -// getMiniBlocksFromCache returns a full list of miniblocks from cache. -// If any of the miniblocks is missing the function returns nil -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromCache(hashes [][]byte) [][]byte { - miniBlocksLen := len(hashes) - miniBlocks := make([][]byte, miniBlocksLen) - - for i := 0; i < miniBlocksLen; i++ { - cachedMB, _ := gbbRes.miniBlockPool.Peek(hashes[i]) +// getMiniBlocksFromCache returns a list of marshalized mini blocks from cache and a list of missing hashes +func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromCache(hashes [][]byte) (map[string][]byte, [][]byte) { + marshalizedMiniBlocks := make(map[string][]byte) + missingMiniBlocksHashes := make([][]byte, 0) - if cachedMB == nil { - return nil + for i := 0; i < len(hashes); i++ { + miniBlock, ok := gbbRes.miniBlockPool.Peek(hashes[i]) + if !ok { + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - buff, err := gbbRes.marshalizer.Marshal(cachedMB) + buff, err := gbbRes.marshalizer.Marshal(miniBlock) if err != nil { log.Debug(err.Error()) - return nil + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - miniBlocks[i] = buff + marshalizedMiniBlocks[string(hashes[i])] = buff } - return miniBlocks + return marshalizedMiniBlocks, missingMiniBlocksHashes } -// getMiniBlocksFromStorer returns a full list of MiniBlocks from the storage unit. -// If any MiniBlock is missing or is invalid, it is removed and the function returns nil -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) [][]byte { - miniBlocksLen := len(hashes) - miniBlocks := make([][]byte, miniBlocksLen) +// getMiniBlocksFromStorer returns a list of marshalized mini blocks from the storage unit and a list of missing hashes +func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) (map[string][]byte, [][]byte) { + marshalizedMiniBlocks := make(map[string][]byte) + missingMiniBlocksHashes := make([][]byte, 0) - for i := 0; i < miniBlocksLen; i++ { + for i := 0; i < len(hashes); i++ { buff, err := gbbRes.miniBlockStorage.Get(hashes[i]) if err != nil { log.Debug(err.Error()) - return nil + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - miniBlocks[i] = buff + marshalizedMiniBlocks[string(hashes[i])] = buff } - return miniBlocks + return marshalizedMiniBlocks, missingMiniBlocksHashes } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/mock/miniBlocksResolverMock.go b/integrationTests/mock/miniBlocksResolverMock.go index 8b2a5a64518..52261bc7960 100644 --- a/integrationTests/mock/miniBlocksResolverMock.go +++ b/integrationTests/mock/miniBlocksResolverMock.go @@ -9,7 +9,7 @@ type MiniBlocksResolverMock struct { RequestDataFromHashCalled func(hash []byte) error RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error - GetMiniBlocksCalled func(hashes [][]byte) block.MiniBlockSlice + GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -24,7 +24,7 @@ func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P return hrm.ProcessReceivedMessageCalled(message) } -func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { +func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return hrm.GetMiniBlocksCalled(hashes) } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 8413b239092..d0989665e95 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -391,10 +391,7 @@ func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler return err } - isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed - if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { - return process.ErrNoSortedHdrsForShard - } + highestHdr := prevHdr missingNonces := make([]uint64, 0) for i := 0; i < len(sortedHdrs); i++ { @@ -417,12 +414,15 @@ func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler missingNonces = append(missingNonces, j) } } + + highestHdr = currHdr } // ask for headers, if there most probably should be - if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { - startNonce := prevHdr.GetNonce() + 1 - for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { + if maxRound > highestHdr.GetRound() { + nbHeaderRequests := maxRound - highestHdr.GetRound() + startNonce := highestHdr.GetNonce() + 1 + for nonce := startNonce; nonce < startNonce+nbHeaderRequests; nonce++ { missingNonces = append(missingNonces, nonce) } } diff --git a/process/mock/miniBlocksResolverMock.go b/process/mock/miniBlocksResolverMock.go index 8b2a5a64518..52261bc7960 100644 --- a/process/mock/miniBlocksResolverMock.go +++ b/process/mock/miniBlocksResolverMock.go @@ -9,7 +9,7 @@ type MiniBlocksResolverMock struct { RequestDataFromHashCalled func(hash []byte) error RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error - GetMiniBlocksCalled func(hashes [][]byte) block.MiniBlockSlice + GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -24,7 +24,7 @@ func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P return hrm.ProcessReceivedMessageCalled(message) } -func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { +func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return hrm.GetMiniBlocksCalled(hashes) } diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 11a219970d0..10cf7295c33 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -29,6 +29,9 @@ const sleepTime = 5 * time.Millisecond // block through recovery mechanism, if its block request is not resolved and no new block header is received meantime const maxRoundsToWait = 5 +// maxHeadersToRequestInAdvance defines the maximum number of headers which will be requested in advance if they are missing +const maxHeadersToRequestInAdvance = 10 + type notarizedInfo struct { lastNotarized map[uint32]uint64 finalNotarized map[uint32]uint64 @@ -331,7 +334,7 @@ func (boot *baseBootstrap) requestedHeaderHash() []byte { } func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { - log.Debug(fmt.Sprintf("receivedHeaders: received header with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", core.ToB64(headerHash), headerHandler.GetNonce())) @@ -357,7 +360,7 @@ func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandle // receivedHeaderNonce method is a call back function which is called when a new header is added // in the block headers pool func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64, shardId uint32, hash []byte) { - log.Debug(fmt.Sprintf("receivedHeaderNonce: received header with nonce %d and hash %s from network\n", + log.Debug(fmt.Sprintf("received header with nonce %d and hash %s from network\n", nonce, core.ToB64(hash))) @@ -367,9 +370,9 @@ func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64, shardId uint32, has } if *n == nonce { - log.Info(fmt.Sprintf("received requested header with nonce %d from network and probable highest nonce is %d\n", + log.Info(fmt.Sprintf("received requested header with nonce %d and hash %s from network\n", nonce, - boot.forkDetector.ProbableHighestNonce())) + core.ToB64(hash))) boot.setRequestedHeaderNonce(nil) boot.chRcvHdrNonce <- true } @@ -562,3 +565,41 @@ func isRandomSeedValid(header data.HeaderHandler) bool { return !isPrevRandSeedNilOrEmpty && !isRandSeedNilOrEmpty } + +func (boot *baseBootstrap) requestHeadersFromNonceIfMissing(nonce uint64, hdrRes dataRetriever.HeaderResolver) { + var err error + nbRequestedHdrs := 0 + maxNonce := core.MinUint64(nonce+maxHeadersToRequestInAdvance-1, boot.forkDetector.ProbableHighestNonce()) + for currentNonce := nonce; currentNonce <= maxNonce; currentNonce++ { + if boot.shardCoordinator.SelfId() == sharding.MetachainShardId { + _, _, err = process.GetMetaHeaderFromPoolWithNonce( + currentNonce, + boot.headers, + boot.headersNonces) + } else { + _, _, err = process.GetShardHeaderFromPoolWithNonce( + currentNonce, + boot.shardCoordinator.SelfId(), + boot.headers, + boot.headersNonces) + } + + if err != nil { + err = hdrRes.RequestDataFromNonce(currentNonce) + if err != nil { + log.Error(err.Error()) + continue + } + + nbRequestedHdrs++ + } + } + + if nbRequestedHdrs > 0 { + log.Info(fmt.Sprintf("requested in advance %d headers from nonce %d to nonce %d and probable highest nonce is %d\n", + nbRequestedHdrs, + nonce, + maxNonce, + boot.forkDetector.ProbableHighestNonce())) + } +} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 44a6a37fb24..f568daee595 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -11,7 +11,7 @@ func (boot *ShardBootstrap) RequestHeaderWithNonce(nonce uint64) { boot.requestHeaderWithNonce(nonce) } -func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) interface{} { +func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return boot.miniBlockResolver.GetMiniBlocks(hashes) } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 29660b934e5..c9342f1ad19 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -451,6 +451,9 @@ func (boot *MetaBootstrap) SyncBlock() error { return err } + // request in advance next meta header if missing + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.hdrRes) + haveTime := func() time.Duration { return boot.rounder.TimeDuration() } @@ -483,7 +486,9 @@ func (boot *MetaBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) err := boot.hdrRes.RequestDataFromNonce(nonce) - log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + log.Info(fmt.Sprintf("requested header with nonce %d from network and probable highest nonce is %d\n", + nonce, + boot.forkDetector.ProbableHighestNonce())) if err != nil { log.Error(err.Error()) diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index d381596d6b8..c9c71459693 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -252,14 +252,17 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data return nil, process.ErrWrongTypeAssertion } - miniBlockHashes := make([][]byte, 0) + hashes := make([][]byte, len(header.MiniBlockHeaders)) for i := 0; i < len(header.MiniBlockHeaders); i++ { - miniBlockHashes = append(miniBlockHashes, header.MiniBlockHeaders[i].Hash) + hashes[i] = header.MiniBlockHeaders[i].Hash } - miniBlockSlice := boot.miniBlockResolver.GetMiniBlocks(miniBlockHashes) + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + if len(missingMiniBlocksHashes) > 0 { + return nil, process.ErrMissingBody + } - return block.Body(miniBlockSlice), nil + return block.Body(miniBlocks), nil } func (boot *ShardBootstrap) removeBlockBody( @@ -672,12 +675,15 @@ func (boot *ShardBootstrap) SyncBlock() error { return err } - miniBlockHashes := make([][]byte, 0) + // request in advance next shard header if missing + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.hdrRes) + + hashes := make([][]byte, len(hdr.MiniBlockHeaders)) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { - miniBlockHashes = append(miniBlockHashes, hdr.MiniBlockHeaders[i].Hash) + hashes[i] = hdr.MiniBlockHeaders[i].Hash } - blk, err := boot.getMiniBlocksRequestingIfMissing(miniBlockHashes) + miniBlockSlice, err := boot.getMiniBlocksRequestingIfMissing(hashes) if err != nil { return err } @@ -686,12 +692,6 @@ func (boot *ShardBootstrap) SyncBlock() error { return boot.rounder.TimeDuration() } - miniBlockSlice, ok := blk.(block.MiniBlockSlice) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - blockBody := block.Body(miniBlockSlice) timeBefore := time.Now() err = boot.blkExecutor.ProcessBlock(boot.blkc, hdr, blockBody, haveTime) @@ -720,7 +720,9 @@ func (boot *ShardBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) err := boot.hdrRes.RequestDataFromNonce(nonce) - log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + log.Info(fmt.Sprintf("requested header with nonce %d from network and probable highest nonce is %d\n", + nonce, + boot.forkDetector.ProbableHighestNonce())) if err != nil { log.Error(err.Error()) @@ -800,7 +802,7 @@ func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { boot.setRequestedMiniBlocks(hashes) err = boot.miniBlockResolver.RequestDataFromHashArray(hashes) - log.Info(fmt.Sprintf("requested %v miniblocks from network\n", len(hashes))) + log.Info(fmt.Sprintf("requested %d miniblocks from network\n", len(hashes))) if err != nil { log.Error(err.Error()) @@ -812,20 +814,22 @@ func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { // the func returns interface{} as to match the next implementations for block body fetchers // that will be added. The block executor should decide by parsing the header block body type value // what kind of block body received. -func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (interface{}, error) { - miniBlocks := boot.miniBlockResolver.GetMiniBlocks(hashes) - if miniBlocks == nil { +func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (block.MiniBlockSlice, error) { + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + if len(missingMiniBlocksHashes) > 0 { _ = process.EmptyChannel(boot.chRcvMiniBlocks) - boot.requestMiniBlocks(hashes) + boot.requestMiniBlocks(missingMiniBlocksHashes) err := boot.waitForMiniBlocks() if err != nil { return nil, err } - miniBlocks = boot.miniBlockResolver.GetMiniBlocks(hashes) - if miniBlocks == nil { + receivedMiniBlocks, unreceivedMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(missingMiniBlocksHashes) + if len(unreceivedMiniBlocksHashes) > 0 { return nil, process.ErrMissingBody } + + miniBlocks = append(miniBlocks, receivedMiniBlocks...) } return miniBlocks, nil @@ -962,14 +966,17 @@ func (boot *ShardBootstrap) getPrevHeader(headerStore storage.Storer, header *bl } func (boot *ShardBootstrap) getTxBlockBody(header *block.Header) (block.Body, error) { - mbLength := len(header.MiniBlockHeaders) - hashes := make([][]byte, mbLength) - for i := 0; i < mbLength; i++ { + hashes := make([][]byte, len(header.MiniBlockHeaders)) + for i := 0; i < len(header.MiniBlockHeaders); i++ { hashes[i] = header.MiniBlockHeaders[i].Hash } - bodyMiniBlocks := boot.miniBlockResolver.GetMiniBlocks(hashes) - return block.Body(bodyMiniBlocks), nil + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + if len(missingMiniBlocksHashes) > 0 { + return nil, process.ErrMissingBody + } + + return block.Body(miniBlocks), nil } func (boot *ShardBootstrap) getCurrentHeader() (*block.Header, error) { diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 2742fe76148..f90f1e2b1c4 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -55,8 +55,8 @@ func createMockResolversFinder() *mock.ResolversFinderStub { if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return &mock.MiniBlocksResolverMock{ - GetMiniBlocksCalled: func(hashes [][]byte) block.MiniBlockSlice { - return make(block.MiniBlockSlice, 0) + GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), make([][]byte, 0) }, }, nil } @@ -88,8 +88,8 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { RequestDataFromHashArrayCalled: func(hash [][]byte) error { return nil }, - GetMiniBlocksCalled: func(hashes [][]byte) block.MiniBlockSlice { - return nil + GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), [][]byte{[]byte("hash")} }, }, nil } @@ -1963,7 +1963,7 @@ func TestShardGetBlockFromPoolShouldReturnBlock(t *testing.T) { mbHashes := make([][]byte, 0) mbHashes = append(mbHashes, []byte("aaaa")) - mb := bs.GetMiniBlocks(mbHashes) + mb, _ := bs.GetMiniBlocks(mbHashes) assert.True(t, reflect.DeepEqual(blk, mb)) } @@ -2625,12 +2625,12 @@ func TestBootstrap_GetTxBodyHavingHashReturnsFromCacherShouldWork(t *testing.T) account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) assert.True(t, reflect.DeepEqual(txBlockRecovered, txBlock)) } -func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *testing.T) { +func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetEmptySlice(t *testing.T) { t.Parallel() mbh := []byte("requested hash") @@ -2679,9 +2679,9 @@ func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *t account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) - assert.Nil(t, txBlockRecovered) + assert.Equal(t, 0, len(txBlockRecovered)) } func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { @@ -2739,7 +2739,7 @@ func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) assert.Equal(t, txBlock, txBlockRecovered) } From 8a312cc43d1fde8bbc48eeafa56792aa29315c27 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Thu, 10 Oct 2019 12:18:32 +0300 Subject: [PATCH 228/234] * Fixed after review --- dataRetriever/interface.go | 3 +- .../resolvers/genericBlockBodyResolver.go | 83 +++++++++---------- .../mock/miniBlocksResolverMock.go | 5 ++ process/mock/miniBlocksResolverMock.go | 5 ++ process/sync/baseSync.go | 21 ++--- process/sync/metablock.go | 12 ++- process/sync/shardblock.go | 17 +++- process/sync/shardblock_test.go | 6 ++ 8 files changed, 86 insertions(+), 66 deletions(-) diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index e6c89486379..ea44f301cae 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -60,7 +60,8 @@ type HeaderResolver interface { type MiniBlocksResolver interface { Resolver RequestDataFromHashArray(hashes [][]byte) error - GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) // TODO miniblockresolver should not know about miniblockslice + GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } // TopicResolverSender defines what sending operations are allowed for a topic resolver diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index 838168fc55a..b5d1ce0eec1 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -83,7 +83,7 @@ func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd *dataRetrieve return nil, err } - miniBlocks, _ := gbbRes.GetMiniBlocks(hashes) + miniBlocks, _ := gbbRes.getMiniBlocks(hashes) if len(miniBlocks) == 0 { return nil, dataRetriever.ErrEmptyMiniBlockSlice } @@ -141,73 +141,54 @@ func (gbbRes *GenericBlockBodyResolver) RequestDataFromHashArray(hashes [][]byte // GetMiniBlocks method returns a list of deserialized mini blocks from a given hash list either from data pool or from storage func (gbbRes *GenericBlockBodyResolver) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { - marshalizedMiniBlocks, missingMiniBlocksHashes := gbbRes.getMiniBlocks(hashes) - miniBlocks := make(block.MiniBlockSlice, 0) - - for hash, marshalizedMiniBlock := range marshalizedMiniBlocks { - miniBlock := &block.MiniBlock{} - err := gbbRes.marshalizer.Unmarshal(miniBlock, marshalizedMiniBlock) - if err != nil { - log.Debug(err.Error()) - gbbRes.miniBlockPool.Remove([]byte(hash)) - err = gbbRes.miniBlockStorage.Remove([]byte(hash)) - if err != nil { - log.Debug(err.Error()) - } - - missingMiniBlocksHashes = append(missingMiniBlocksHashes, []byte(hash)) - continue - } - - miniBlocks = append(miniBlocks, miniBlock) - } + return gbbRes.getMiniBlocks(hashes) +} - return miniBlocks, missingMiniBlocksHashes +// GetMiniBlocks method returns a list of deserialized mini blocks from a given hash list from data pool +func (gbbRes *GenericBlockBodyResolver) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return gbbRes.getMiniBlocksFromPool(hashes) } // getMiniBlocks method returns a list of serialized mini blocks from a given hash list either from data pool or from storage -func (gbbRes *GenericBlockBodyResolver) getMiniBlocks(hashes [][]byte) (map[string][]byte, [][]byte) { - marshalizedMiniBlocks, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromCache(hashes) +func (gbbRes *GenericBlockBodyResolver) getMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) == 0 { - return marshalizedMiniBlocks, missingMiniBlocksHashes + return miniBlocks, missingMiniBlocksHashes } - marshalizedMiniBlocksFromStorer, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromStorer(missingMiniBlocksHashes) - for hash, marshalizedMiniBlockFromStorer := range marshalizedMiniBlocksFromStorer { - marshalizedMiniBlocks[hash] = marshalizedMiniBlockFromStorer - } + miniBlocksFromStorer, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromStorer(missingMiniBlocksHashes) + miniBlocks = append(miniBlocks, miniBlocksFromStorer...) - return marshalizedMiniBlocks, missingMiniBlocksHashes + return miniBlocks, missingMiniBlocksHashes } -// getMiniBlocksFromCache returns a list of marshalized mini blocks from cache and a list of missing hashes -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromCache(hashes [][]byte) (map[string][]byte, [][]byte) { - marshalizedMiniBlocks := make(map[string][]byte) +// getMiniBlocksFromPool returns a list of mini blocks from cache and a list of missing hashes +func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks := make(block.MiniBlockSlice, 0) missingMiniBlocksHashes := make([][]byte, 0) for i := 0; i < len(hashes); i++ { - miniBlock, ok := gbbRes.miniBlockPool.Peek(hashes[i]) + obj, ok := gbbRes.miniBlockPool.Peek(hashes[i]) if !ok { missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) continue } - buff, err := gbbRes.marshalizer.Marshal(miniBlock) - if err != nil { - log.Debug(err.Error()) + miniBlock, ok := obj.(*block.MiniBlock) + if !ok { missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) continue } - marshalizedMiniBlocks[string(hashes[i])] = buff + miniBlocks = append(miniBlocks, miniBlock) } - return marshalizedMiniBlocks, missingMiniBlocksHashes + return miniBlocks, missingMiniBlocksHashes } -// getMiniBlocksFromStorer returns a list of marshalized mini blocks from the storage unit and a list of missing hashes -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) (map[string][]byte, [][]byte) { - marshalizedMiniBlocks := make(map[string][]byte) +// getMiniBlocksFromStorer returns a list of mini blocks from storage and a list of missing hashes +func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks := make(block.MiniBlockSlice, 0) missingMiniBlocksHashes := make([][]byte, 0) for i := 0; i < len(hashes); i++ { @@ -218,10 +199,24 @@ func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) continue } - marshalizedMiniBlocks[string(hashes[i])] = buff + miniBlock := &block.MiniBlock{} + err = gbbRes.marshalizer.Unmarshal(miniBlock, buff) + if err != nil { + log.Debug(err.Error()) + gbbRes.miniBlockPool.Remove([]byte(hashes[i])) + err = gbbRes.miniBlockStorage.Remove([]byte(hashes[i])) + if err != nil { + log.Debug(err.Error()) + } + + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue + } + + miniBlocks = append(miniBlocks, miniBlock) } - return marshalizedMiniBlocks, missingMiniBlocksHashes + return miniBlocks, missingMiniBlocksHashes } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/mock/miniBlocksResolverMock.go b/integrationTests/mock/miniBlocksResolverMock.go index 52261bc7960..9dc3364aa95 100644 --- a/integrationTests/mock/miniBlocksResolverMock.go +++ b/integrationTests/mock/miniBlocksResolverMock.go @@ -10,6 +10,7 @@ type MiniBlocksResolverMock struct { RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPoolCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -28,6 +29,10 @@ func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlo return hrm.GetMiniBlocksCalled(hashes) } +func (hrm *MiniBlocksResolverMock) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return hrm.GetMiniBlocksFromPoolCalled(hashes) +} + // IsInterfaceNil returns true if there is no value under the interface func (hrm *MiniBlocksResolverMock) IsInterfaceNil() bool { if hrm == nil { diff --git a/process/mock/miniBlocksResolverMock.go b/process/mock/miniBlocksResolverMock.go index 52261bc7960..9dc3364aa95 100644 --- a/process/mock/miniBlocksResolverMock.go +++ b/process/mock/miniBlocksResolverMock.go @@ -10,6 +10,7 @@ type MiniBlocksResolverMock struct { RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPoolCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -28,6 +29,10 @@ func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlo return hrm.GetMiniBlocksCalled(hashes) } +func (hrm *MiniBlocksResolverMock) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return hrm.GetMiniBlocksFromPoolCalled(hashes) +} + // IsInterfaceNil returns true if there is no value under the interface func (hrm *MiniBlocksResolverMock) IsInterfaceNil() bool { if hrm == nil { diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 10cf7295c33..1f380a57f95 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -566,24 +566,15 @@ func isRandomSeedValid(header data.HeaderHandler) bool { return !isPrevRandSeedNilOrEmpty && !isRandSeedNilOrEmpty } -func (boot *baseBootstrap) requestHeadersFromNonceIfMissing(nonce uint64, hdrRes dataRetriever.HeaderResolver) { - var err error +func (boot *baseBootstrap) requestHeadersFromNonceIfMissing( + nonce uint64, + getHeaderFromPoolWithNonce func(uint64) error, + hdrRes dataRetriever.HeaderResolver) { + nbRequestedHdrs := 0 maxNonce := core.MinUint64(nonce+maxHeadersToRequestInAdvance-1, boot.forkDetector.ProbableHighestNonce()) for currentNonce := nonce; currentNonce <= maxNonce; currentNonce++ { - if boot.shardCoordinator.SelfId() == sharding.MetachainShardId { - _, _, err = process.GetMetaHeaderFromPoolWithNonce( - currentNonce, - boot.headers, - boot.headersNonces) - } else { - _, _, err = process.GetShardHeaderFromPoolWithNonce( - currentNonce, - boot.shardCoordinator.SelfId(), - boot.headers, - boot.headersNonces) - } - + err := getHeaderFromPoolWithNonce(nonce) if err != nil { err = hdrRes.RequestDataFromNonce(currentNonce) if err != nil { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index c9342f1ad19..fec620c5020 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -451,8 +451,7 @@ func (boot *MetaBootstrap) SyncBlock() error { return err } - // request in advance next meta header if missing - go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.hdrRes) + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.getMetaHeaderFromPoolWithNonce, boot.hdrRes) haveTime := func() time.Duration { return boot.rounder.TimeDuration() @@ -679,3 +678,12 @@ func (boot *MetaBootstrap) IsInterfaceNil() bool { } return false } + +func (boot *MetaBootstrap) getMetaHeaderFromPoolWithNonce(nonce uint64) error { + _, _, err := process.GetMetaHeaderFromPoolWithNonce( + nonce, + boot.headers, + boot.headersNonces) + + return err +} diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index c9c71459693..0e36aad143e 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -675,8 +675,7 @@ func (boot *ShardBootstrap) SyncBlock() error { return err } - // request in advance next shard header if missing - go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.hdrRes) + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.getShardHeaderFromPoolWithNonce, boot.hdrRes) hashes := make([][]byte, len(hdr.MiniBlockHeaders)) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { @@ -815,7 +814,7 @@ func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { // that will be added. The block executor should decide by parsing the header block body type value // what kind of block body received. func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (block.MiniBlockSlice, error) { - miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocksFromPool(hashes) if len(missingMiniBlocksHashes) > 0 { _ = process.EmptyChannel(boot.chRcvMiniBlocks) boot.requestMiniBlocks(missingMiniBlocksHashes) @@ -824,7 +823,7 @@ func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (b return nil, err } - receivedMiniBlocks, unreceivedMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(missingMiniBlocksHashes) + receivedMiniBlocks, unreceivedMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocksFromPool(missingMiniBlocksHashes) if len(unreceivedMiniBlocksHashes) > 0 { return nil, process.ErrMissingBody } @@ -1000,3 +999,13 @@ func (boot *ShardBootstrap) IsInterfaceNil() bool { } return false } + +func (boot *ShardBootstrap) getShardHeaderFromPoolWithNonce(nonce uint64) error { + _, _, err := process.GetShardHeaderFromPoolWithNonce( + nonce, + boot.shardCoordinator.SelfId(), + boot.headers, + boot.headersNonces) + + return err +} diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index f90f1e2b1c4..64f0d3a658b 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -58,6 +58,9 @@ func createMockResolversFinder() *mock.ResolversFinderStub { GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return make(block.MiniBlockSlice, 0), make([][]byte, 0) }, + GetMiniBlocksFromPoolCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), make([][]byte, 0) + }, }, nil } @@ -91,6 +94,9 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return make(block.MiniBlockSlice, 0), [][]byte{[]byte("hash")} }, + GetMiniBlocksFromPoolCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), [][]byte{[]byte("hash")} + }, }, nil } From 9e9365e7495c709e9684a9ef575ac72960dcd998 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Thu, 10 Oct 2019 12:25:29 +0300 Subject: [PATCH 229/234] EN-4270 : index metablocks in elastic search --- core/indexer/elasticsearch.go | 10 ++++++++++ core/indexer/interface.go | 1 + core/indexer/nilIndexer.go | 4 ++++ core/mock/indexerMock.go | 4 ++++ node/mock/indexerMock.go | 4 ++++ process/block/metablock.go | 2 ++ process/block/metrics.go | 2 +- process/mock/indexerMock.go | 4 ++++ 8 files changed, 30 insertions(+), 1 deletion(-) diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index d671d8747fd..6892d7437ed 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -240,6 +240,16 @@ func (ei *elasticIndexer) SaveBlock( } } +// SaveMetaBlock will index a meta block in elastic search +func (ei *elasticIndexer) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + if header == nil || header.IsInterfaceNil() { + ei.logger.Warn(ErrNoHeader.Error()) + return + } + + go ei.saveHeader(header, signersIndexes) +} + // SaveRoundInfo will save data about a round on elastic search func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { var buff bytes.Buffer diff --git a/core/indexer/interface.go b/core/indexer/interface.go index ad29e9aa896..94ca33be898 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -9,6 +9,7 @@ import ( // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) + SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) SaveRoundInfo(roundInfo RoundInfo) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go index a63e08462c9..de2ced5d168 100644 --- a/core/indexer/nilIndexer.go +++ b/core/indexer/nilIndexer.go @@ -19,6 +19,10 @@ func (ni *NilIndexer) SaveBlock(body data.BodyHandler, header data.HeaderHandler return } +func (im *NilIndexer) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + // SaveRoundInfo will do nothing func (ni *NilIndexer) SaveRoundInfo(info RoundInfo) { return diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 7f43e764569..d89a1ac4085 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -16,6 +16,10 @@ func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandle panic("implement me") } +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go index 7f43e764569..10de9fbdaa3 100644 --- a/node/mock/indexerMock.go +++ b/node/mock/indexerMock.go @@ -16,6 +16,10 @@ func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandle panic("implement me") } +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + panic("implement me") +} + func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } diff --git a/process/block/metablock.go b/process/block/metablock.go index 79cb2651b9a..5507db7ef57 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -281,6 +281,8 @@ func (mp *metaProcessor) indexBlock( } signersIndexes := mp.nodesCoordinator.GetValidatorsIndexes(publicKeys) + go mp.core.Indexer().SaveMetaBlock(metaBlock, signersIndexes) + saveRoundInfoInElastic(mp.core.Indexer(), mp.nodesCoordinator, sharding.MetachainShardId, metaBlock, lastMetaBlock, signersIndexes) } diff --git a/process/block/metrics.go b/process/block/metrics.go index 5d9f7cf2d54..9d031db97e4 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -110,7 +110,7 @@ func saveRoundInfoInElastic( roundInfo = indexer.RoundInfo{ Index: i, SignersIndexes: signersIndexes, - BlockWasProposed: true, + BlockWasProposed: false, ShardId: shardId, Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), } diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 2d2552a6da5..b9fe13016ac 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -17,6 +17,10 @@ func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandle } } +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } From 51f8e74d58aef8c17ffbe08ca995d4b441331672 Mon Sep 17 00:00:00 2001 From: Sebastian Marian Date: Thu, 10 Oct 2019 13:34:42 +0300 Subject: [PATCH 230/234] * Fixed after review --- process/block/export_test.go | 10 +++++--- process/block/metablock.go | 44 ++++++++++++++++++++------------- process/block/metablock_test.go | 35 +++++++++++++++++++++++--- process/block/shardblock.go | 12 ++++----- 4 files changed, 70 insertions(+), 31 deletions(-) diff --git a/process/block/export_test.go b/process/block/export_test.go index ba5a98aa534..d21f56d655c 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -106,8 +106,8 @@ func (mp *metaProcessor) RequestBlockHeaders(header *block.MetaBlock) (uint32, u return mp.requestShardHeaders(header) } -func (mp *metaProcessor) RemoveBlockInfoFromPool() error { - return mp.removeBlockInfoFromPool() +func (mp *metaProcessor) RemoveBlockInfoFromPool(header *block.MetaBlock) error { + return mp.removeBlockInfoFromPool(header) } func (mp *metaProcessor) ReceivedShardHeader(shardHeaderHash []byte) { @@ -178,11 +178,13 @@ func (bp *baseProcessor) SetHasher(hasher hashing.Hasher) { } func (mp *metaProcessor) SetShardBlockFinality(val uint32) { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() mp.shardBlockFinality = val + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } -func (mp *metaProcessor) SaveLastNotarizedHeader() error { - return mp.saveLastNotarizedHeader() +func (mp *metaProcessor) SaveLastNotarizedHeader(header *block.MetaBlock) error { + return mp.saveLastNotarizedHeader(header) } func (mp *metaProcessor) CheckShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { diff --git a/process/block/metablock.go b/process/block/metablock.go index ed9b9f094cd..95457f464ab 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -279,7 +279,11 @@ func (mp *metaProcessor) indexBlock() { } // removeBlockInfoFromPool removes the block info from associated pools -func (mp *metaProcessor) removeBlockInfoFromPool() error { +func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error { + if header == nil || header.IsInterfaceNil() { + return process.ErrNilMetaBlockHeader + } + headerPool := mp.dataPool.ShardHeaders() if headerPool == nil || headerPool.IsInterfaceNil() { return process.ErrNilHeadersDataPool @@ -291,9 +295,12 @@ func (mp *metaProcessor) removeBlockInfoFromPool() error { } mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for shardBlockHash, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { - if !hdrInfo.usedInBlock { - continue + for i := 0; i < len(header.ShardInfo); i++ { + shardHeaderHash := header.ShardInfo[i].HeaderHash + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader } shardBlock, ok := hdrInfo.hdr.(*block.Header) @@ -302,7 +309,7 @@ func (mp *metaProcessor) removeBlockInfoFromPool() error { return process.ErrWrongTypeAssertion } - headerPool.Remove([]byte(shardBlockHash)) + headerPool.Remove([]byte(shardHeaderHash)) headerNoncesPool.Remove(shardBlock.Nonce, shardBlock.ShardId) } mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() @@ -509,7 +516,7 @@ func (mp *metaProcessor) CommitBlock( mp.saveMetricCrossCheckBlockHeight() - err = mp.saveLastNotarizedHeader() + err = mp.saveLastNotarizedHeader(header) if err != nil { return err } @@ -523,7 +530,7 @@ func (mp *metaProcessor) CommitBlock( header.Nonce, core.ToB64(headerHash))) - errNotCritical = mp.removeBlockInfoFromPool() + errNotCritical = mp.removeBlockInfoFromPool(header) if errNotCritical != nil { log.Info(errNotCritical.Error()) } @@ -608,7 +615,7 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) } -func (mp *metaProcessor) saveLastNotarizedHeader() error { +func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error { mp.mutNotarizedHdrs.Lock() defer mp.mutNotarizedHdrs.Unlock() @@ -622,9 +629,12 @@ func (mp *metaProcessor) saveLastNotarizedHeader() error { } mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() - for _, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { - if !hdrInfo.usedInBlock { - continue + for i := 0; i < len(header.ShardInfo); i++ { + shardHeaderHash := header.ShardInfo[i].HeaderHash + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader } shardHdr, ok := hdrInfo.hdr.(*block.Header) @@ -837,13 +847,13 @@ func (mp *metaProcessor) receivedShardHeader(shardHeaderHash []byte) { func (mp *metaProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - firstFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + 1 - lastFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + uint64(mp.shardBlockFinality) - for i := firstFinalityAttestingHeader; i <= lastFinalityAttestingHeader; i++ { - if mp.hdrsForCurrBlock.highestHdrNonce[shardId] == uint64(0) { - continue - } + highestHdrNonce := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + if highestHdrNonce == uint64(0) { + continue + } + lastFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + uint64(mp.shardBlockFinality) + for i := highestHdrNonce + 1; i <= lastFinalityAttestingHeader; i++ { shardHeader, shardHeaderHash, err := process.GetShardHeaderFromPoolWithNonce( i, shardId, diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 5342270817d..abb31ca7898 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -735,6 +735,7 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { blkc, _ := blockchain.NewMetaChain( generateTestCache(), ) + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) err := mp.CommitBlock(blkc, hdr, body) assert.True(t, wasCalled) assert.Nil(t, err) @@ -901,6 +902,30 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { assert.Equal(t, uint32(1), hdrsRequested) } +func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *testing.T) { + t.Parallel() + + mdp := initMetaDataPool() + mp, _ := blproc.NewMetaProcessor( + &mock.ServiceContainerMock{}, + &mock.AccountsStub{}, + mdp, + &mock.ForkDetectorMock{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.SpecialAddressHandlerMock{}, + &mock.HasherStub{}, + &mock.MarshalizerMock{}, + initStore(), + createGenesisBlocks(mock.NewOneShardCoordinatorMock()), + &mock.RequestHandlerMock{}, + &mock.Uint64ByteSliceConverterMock{}, + ) + err := mp.RemoveBlockInfoFromPool(nil) + assert.NotNil(t, err) + assert.Equal(t, err, process.ErrNilMetaBlockHeader) +} + func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { t.Parallel() @@ -921,7 +946,8 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { &mock.Uint64ByteSliceConverterMock{}, ) mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) - err := mp.RemoveBlockInfoFromPool() + header := createMetaBlockHeader() + err := mp.RemoveBlockInfoFromPool(header) assert.Nil(t, err) } @@ -1783,7 +1809,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) // test header not in pool and defer called - err := mp.SaveLastNotarizedHeader() + err := mp.SaveLastNotarizedHeader(metaHdr) + assert.Equal(t, process.ErrMissingHeader, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) @@ -1793,7 +1820,7 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { mp.SetHdrForCurrentBlock(currHash, metaHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - err = mp.SaveLastNotarizedHeader() + err = mp.SaveLastNotarizedHeader(metaHdr) assert.Equal(t, process.ErrWrongTypeAssertion, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) @@ -1805,7 +1832,7 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { mp.SetHdrForCurrentBlock(currHash, currHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - err = mp.SaveLastNotarizedHeader() + err = mp.SaveLastNotarizedHeader(metaHdr) assert.Nil(t, err) notarizedHdrs = mp.NotarizedHdrs() assert.Equal(t, currHdr, mp.LastNotarizedHdrForShard(currHdr.ShardId)) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 143f3ec5c2e..dcc389ae678 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -1153,13 +1153,13 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { // which should be processed func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) - firstFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + 1 - lastFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + uint64(sp.metaBlockFinality) - for i := firstFinalityAttestingHeader; i <= lastFinalityAttestingHeader; i++ { - if sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] == uint64(0) { - continue - } + highestHdrNonce := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + if highestHdrNonce == uint64(0) { + return requestedBlockHeaders + } + lastFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + uint64(sp.metaBlockFinality) + for i := highestHdrNonce + 1; i <= lastFinalityAttestingHeader; i++ { metaBlock, metaBlockHash, err := process.GetMetaHeaderFromPoolWithNonce( i, sp.dataPool.MetaBlocks(), From 3003f382d357c9b2be896618bd94c5098ade3fb2 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 11 Oct 2019 13:53:56 +0300 Subject: [PATCH 231/234] EN-4222 : reformat code --- process/block/metablock.go | 8 ++++---- process/block/shardblock.go | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index 5bd3c5f1af1..cd9c74ff26d 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -20,12 +20,12 @@ import ( // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder shardsHeadersNonce *sync.Map shardBlockFinality uint32 - chRcvAllHdrs chan bool - headersCounter *headersCounter + chRcvAllHdrs chan bool + headersCounter *headersCounter } // NewMetaProcessor creates a new metaProcessor object diff --git a/process/block/shardblock.go b/process/block/shardblock.go index dd312a3d543..d744aa47f7c 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -24,15 +24,15 @@ const maxCleanTime = time.Second // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor - dataPool dataRetriever.PoolsHolder - metaBlockFinality uint32 - chRcvAllMetaHdrs chan bool + dataPool dataRetriever.PoolsHolder + metaBlockFinality uint32 + chRcvAllMetaHdrs chan bool processedMiniBlocks map[string]map[string]struct{} mutProcessedMiniBlocks sync.RWMutex - core serviceContainer.Core - txCoordinator process.TransactionCoordinator - txCounter *transactionCounter - txsPoolsCleaner process.PoolsCleaner + core serviceContainer.Core + txCoordinator process.TransactionCoordinator + txCounter *transactionCounter + txsPoolsCleaner process.PoolsCleaner } // NewShardProcessor creates a new shardProcessor object From d03d961a2f3be1d9b3a2c4eeeb3d68c714c39ad7 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 11 Oct 2019 14:01:53 +0300 Subject: [PATCH 232/234] EN-4270 : fix after review --- process/block/metablock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index 5507db7ef57..a4a6d878605 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -279,8 +279,8 @@ func (mp *metaProcessor) indexBlock( if err != nil { return } - signersIndexes := mp.nodesCoordinator.GetValidatorsIndexes(publicKeys) + signersIndexes := mp.nodesCoordinator.GetValidatorsIndexes(publicKeys) go mp.core.Indexer().SaveMetaBlock(metaBlock, signersIndexes) saveRoundInfoInElastic(mp.core.Indexer(), mp.nodesCoordinator, sharding.MetachainShardId, metaBlock, lastMetaBlock, signersIndexes) From d538921eb9a03705f7b3a8372f18714ae8a9dc2b Mon Sep 17 00:00:00 2001 From: miiu96 Date: Fri, 11 Oct 2019 14:50:18 +0300 Subject: [PATCH 233/234] EN-4270 : fix after review --- core/indexer/data.go | 2 +- core/indexer/elasticsearch.go | 6 +++--- process/block/export_test.go | 9 --------- process/block/metablock.go | 3 +-- 4 files changed, 5 insertions(+), 15 deletions(-) diff --git a/core/indexer/data.go b/core/indexer/data.go index 2bce4546333..4041e166202 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -14,7 +14,7 @@ type Transaction struct { BlockHash string `json:"blockHash"` Nonce uint64 `json:"nonce"` Round uint64 `json:"round"` - Value *big.Int `json:"value"` + Value string `json:"value"` Receiver string `json:"receiver"` Sender string `json:"sender"` ReceiverShard uint32 `json:"receiverShard"` diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 6892d7437ed..264bd4aa7ca 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -639,7 +639,7 @@ func buildTransaction( BlockHash: hex.EncodeToString(blockHash), Nonce: tx.Nonce, Round: header.GetRound(), - Value: tx.Value, + Value: tx.Value.String(), Receiver: hex.EncodeToString(tx.RcvAddr), Sender: hex.EncodeToString(tx.SndAddr), ReceiverShard: mb.ReceiverShardID, @@ -667,7 +667,7 @@ func buildSmartContractResult( BlockHash: hex.EncodeToString(blockHash), Nonce: scr.Nonce, Round: header.GetRound(), - Value: scr.Value, + Value: scr.Value.String(), Receiver: hex.EncodeToString(scr.RcvAddr), Sender: hex.EncodeToString(scr.SndAddr), ReceiverShard: mb.ReceiverShardID, @@ -698,7 +698,7 @@ func buildRewardTransaction( BlockHash: hex.EncodeToString(blockHash), Nonce: 0, Round: rTx.Round, - Value: rTx.Value, + Value: rTx.Value.String(), Receiver: hex.EncodeToString(rTx.RcvAddr), Sender: shardIdStr, ReceiverShard: mb.ReceiverShardID, diff --git a/process/block/export_test.go b/process/block/export_test.go index 4babf9d51f5..e7b49ca62f7 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -318,15 +318,6 @@ func (bp *baseProcessor) SetHighestHdrNonceForCurrentBlock(shardId uint32, value bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } -func (sp *shardProcessor) CalculateRoundDuration( - lastBlockTimestamp uint64, - currentBlockTimestamp uint64, - lastBlockRound uint64, - currentBlockRound uint64, -) uint64 { - return sp.calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) -} - func (bp *baseProcessor) CreateBlockStarted() { bp.createBlockStarted() } diff --git a/process/block/metablock.go b/process/block/metablock.go index 317122b7627..1bbd77343ce 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -266,7 +266,6 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { func (mp *metaProcessor) indexBlock( metaBlock data.HeaderHandler, - headerPool map[string]*block.Header, lastMetaBlock data.HeaderHandler, ) { if mp.core == nil || mp.core.Indexer() == nil { @@ -579,7 +578,7 @@ func (mp *metaProcessor) CommitBlock( mp.core.TPSBenchmark().Update(header) } - mp.indexBlock(header, tempHeaderPool, lastMetaBlock) + mp.indexBlock(header, lastMetaBlock) mp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) From fa57aa3bd1f62e2f85c8a10785a1cf3a691e7998 Mon Sep 17 00:00:00 2001 From: miiu96 Date: Mon, 14 Oct 2019 11:19:55 +0300 Subject: [PATCH 234/234] EN-4222 : add import that missing --- process/block/metablock.go | 1 + 1 file changed, 1 insertion(+) diff --git a/process/block/metablock.go b/process/block/metablock.go index 735c24aa137..7dbace5ea5c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/throttle" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" )